bk://gkernel.bkbits.net/netdev-2.6 jgarzik@pobox.com|ChangeSet|20040707051026|14412 jgarzik # This is a BitKeeper generated diff -Nru style patch. # # ChangeSet # 2004/07/11 13:38:39-07:00 akpm@bix.(none) # Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-netdev # # drivers/net/Kconfig # 2004/07/11 13:38:36-07:00 akpm@bix.(none) +0 -0 # Auto merged # # ChangeSet # 2004/07/07 01:10:26-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/netdev-2.6/mv64340 # into pobox.com:/spare/repo/netdev-2.6/ALL # # drivers/net/Kconfig # 2004/07/07 01:10:22-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/07 01:07:19-04:00 hch@lst.de # [PATCH] allow modular mv64340_eth # # Only tested together with some additional patches to make the driver # work on PPC that need more work, but the changes are obvious. # # drivers/net/mv64340_eth.c # 2004/07/06 08:32:15-04:00 hch@lst.de +31 -9 # allow modular mv64340_eth # # drivers/net/Kconfig # 2004/07/06 08:32:40-04:00 hch@lst.de +1 -1 # allow modular mv64340_eth # # ChangeSet # 2004/07/07 01:01:55-04:00 margitsw@t-online.de # [PATCH] prism54 freq to channel incorrect for 5GHz # # 2004-07-06 Margit Schubert-While # # * The frequency to channel conversion is wrong for the 5GHz band # * Although the (known) devices don't/can't use it, # they do report it. (iwlist ethX freq) # # drivers/net/wireless/prism54/oid_mgt.c # 2004/07/06 11:26:44-04:00 margitsw@t-online.de +2 -8 # prism54 freq to channel incorrect for 5GHz # # ChangeSet # 2004/07/05 12:47:32-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/netdev-2.6/gianfar # into pobox.com:/spare/repo/netdev-2.6/ALL # # drivers/net/Makefile # 2004/07/05 12:47:29-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # drivers/net/Kconfig # 2004/07/05 12:47:28-04:00 jgarzik@pobox.com +11 -11 # Auto merged # # ChangeSet # 2004/07/05 12:46:23-04:00 kumar.gala@freescale.com # [PATCH] add new ethernet driver 'gianfar' # # drivers/net/gianfar_phy.h # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +192 -0 # updated gianfar enet 2.6 # # drivers/net/gianfar_phy.c # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +504 -0 # updated gianfar enet 2.6 # # drivers/net/gianfar_ethtool.c # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +484 -0 # updated gianfar enet 2.6 # # drivers/net/gianfar.h # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +537 -0 # updated gianfar enet 2.6 # # drivers/net/gianfar_phy.h # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +0 -0 # BitKeeper file /spare/repo/netdev-2.6/gianfar/drivers/net/gianfar_phy.h # # drivers/net/gianfar_phy.c # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +0 -0 # BitKeeper file /spare/repo/netdev-2.6/gianfar/drivers/net/gianfar_phy.c # # drivers/net/gianfar_ethtool.c # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +0 -0 # BitKeeper file /spare/repo/netdev-2.6/gianfar/drivers/net/gianfar_ethtool.c # # drivers/net/gianfar.h # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +0 -0 # BitKeeper file /spare/repo/netdev-2.6/gianfar/drivers/net/gianfar.h # # drivers/net/gianfar.c # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +1921 -0 # updated gianfar enet 2.6 # # drivers/net/Makefile # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +1 -0 # updated gianfar enet 2.6 # # drivers/net/Kconfig # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +11 -0 # updated gianfar enet 2.6 # # drivers/net/gianfar.c # 2004/06/28 11:16:57-04:00 kumar.gala@freescale.com +0 -0 # BitKeeper file /spare/repo/netdev-2.6/gianfar/drivers/net/gianfar.c # # ChangeSet # 2004/07/04 13:40:24-04:00 herbert@gondor.apana.org.au # [PATCH] Fix successive calls to spin_lock_irqsave in sk98lin # # This patch fixes the few places in sk98lin where it calls # spin_lock_saveirq on the same flags variable thus causing # interrupts to be disabled upon leaving the driver. # # drivers/net/sk98lin/skge.c # 2004/07/02 22:07:10-04:00 herbert@gondor.apana.org.au +6 -12 # Re: Resend: [NETDRV] Fix successive calls to spin_lock_irqsave in sk98lin # # ChangeSet # 2004/07/04 13:35:18-04:00 hch@lst.de # [PATCH] convert skge to pci_driver API (2nd try) # # drivers/net/sk98lin/skge.c # 2004/07/03 16:00:05-04:00 hch@lst.de +314 -410 # convert skge to pci_driver API (2nd try)] # # drivers/net/sk98lin/h/skdrv2nd.h # 2004/07/03 15:57:55-04:00 hch@lst.de +0 -54 # convert skge to pci_driver API (2nd try)] # # ChangeSet # 2004/07/03 13:27:04-07:00 akpm@bix.(none) # Merge bk://gkernel.bkbits.net/netdev-2.6 # into bix.(none):/usr/src/bk-netdev # # include/linux/pci_ids.h # 2004/07/03 13:27:00-07:00 akpm@bix.(none) +0 -0 # Auto merged # # ChangeSet # 2004/07/03 02:22:51-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/netdev-2.6/velocity # into pobox.com:/spare/repo/netdev-2.6/ALL # # include/linux/pci_ids.h # 2004/07/03 02:22:46-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # drivers/net/Kconfig # 2004/07/03 02:22:46-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/03 02:21:07-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/linux-2.6 # into pobox.com:/spare/repo/netdev-2.6/velocity # # include/linux/pci_ids.h # 2004/07/03 02:21:04-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/03 02:20:30-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: use common crc16 code for WOL # # - use common crc16 code for WOL; # - remove unused ether_crc. # # Signed-off-by: Francois Romieu # # drivers/net/via-velocity.c # 2004/06/30 16:31:05-04:00 romieu@fr.zoreil.com +4 -105 # via-velocity: use common crc16 code for WOL # # drivers/net/Kconfig # 2004/06/30 16:31:05-04:00 romieu@fr.zoreil.com +1 -0 # via-velocity: use common crc16 code for WOL # # ChangeSet # 2004/07/03 02:14:39-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/netdev-2.6/forcedeth # into pobox.com:/spare/repo/netdev-2.6/ALL # # include/linux/pci_ids.h # 2004/07/03 02:14:35-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/03 02:13:40-04:00 manfred@colorfullife.com # [PATCH] Gigabit Ethernet support for forcedeth # # - Lots of updates for the gigabit ethernet nic: New ring entry format, # support for RGMII phys, new pci ids. # - Silence interrupt source 0x01: it's rx error, no need to ask the end # user to report it. # - add support for vlan packets: The NvRegOffloadConfig register contains # the maximum packet size, it was set to 1518 which caused vlan to fail. # - fix bit flags for mii access: the wrong bit was polled and the mii # write implementation was just broken. # - Do not stop the rx/tx engines around mii accesses. # - reset and reinit the phy during probe. # # include/linux/pci_ids.h # 2004/07/01 01:06:37-04:00 manfred@colorfullife.com +13 -0 # Gigabit Ethernet support for forcedeth # # drivers/net/forcedeth.c # 2004/07/01 01:06:52-04:00 manfred@colorfullife.com +620 -228 # Gigabit Ethernet support for forcedeth # # ChangeSet # 2004/07/03 02:11:14-04:00 manfred@colorfullife.com # [PATCH] natsemi 2: support packets > 1518 bytes # # drivers/net/natsemi.c # 2004/06/25 15:36:12-04:00 manfred@colorfullife.com +110 -25 # natsemi 2: support packets > 1518 bytes # # ChangeSet # 2004/07/03 02:11:05-04:00 manfred@colorfullife.com # [PATCH] natsemi 1: switch to netdev_priv() # # drivers/net/natsemi.c # 2004/06/25 15:35:12-04:00 manfred@colorfullife.com +48 -48 # natsemi 1: switch to netdev_priv() # # ChangeSet # 2004/07/02 15:22:53-07:00 akpm@bix.(none) # Merge bk://gkernel.bkbits.net/netdev-2.6 # into bix.(none):/usr/src/bk-netdev # # include/linux/pci_ids.h # 2004/07/02 15:22:50-07:00 akpm@bix.(none) +0 -0 # Auto merged # # ChangeSet # 2004/07/02 15:13:01-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/netdev-2.6/r8169 # into pobox.com:/spare/repo/netdev-2.6/ALL # # drivers/net/Kconfig # 2004/07/02 15:12:57-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 15:08:18-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/netdev-2.6/mv64340 # into pobox.com:/spare/repo/netdev-2.6/ALL # # drivers/net/Kconfig # 2004/07/02 15:08:13-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 15:03:28-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/linux-2.6 # into pobox.com:/spare/repo/netdev-2.6/via-rhine # # drivers/net/via-rhine.c # 2004/07/02 15:03:24-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 15:01:24-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/linux-2.6 # into pobox.com:/spare/repo/netdev-2.6/velocity # # include/linux/pci_ids.h # 2004/07/02 15:01:20-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 14:51:49-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/linux-2.6 # into pobox.com:/spare/repo/netdev-2.6/r8169 # # drivers/net/r8169.c # 2004/07/02 14:51:44-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # drivers/net/Kconfig # 2004/07/02 14:51:43-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 14:48:04-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/linux-2.6 # into pobox.com:/spare/repo/netdev-2.6/natsemi # # drivers/net/natsemi.c # 2004/07/02 14:47:59-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 14:46:02-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/linux-2.6 # into pobox.com:/spare/repo/netdev-2.6/mv64340 # # drivers/net/Makefile # 2004/07/02 14:45:56-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # drivers/net/Kconfig # 2004/07/02 14:45:56-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 14:41:36-04:00 jgarzik@pobox.com # Merge pobox.com:/spare/repo/linux-2.6 # into pobox.com:/spare/repo/netdev-2.6/epic100 # # drivers/net/epic100.c # 2004/07/02 14:41:26-04:00 jgarzik@pobox.com +0 -0 # Auto merged # # ChangeSet # 2004/07/02 14:34:44-04:00 jgarzik@pobox.com # Hand-merge upstream Kconfig help text. # # drivers/net/Kconfig # 2004/07/02 14:34:35-04:00 jgarzik@pobox.com +17 -0 # Hand-merge upstream Kconfig help text. # # ChangeSet # 2004/06/22 00:37:39-04:00 akpm@osdl.org # [PATCH] via-velocity warning fixes # # With CONFIG_PM=n various functions (such as velocity_set_wol) are unused # and we get warnings and unused code. # # Fix that up by moving the functions so they fall inside the callers's #ifdef # CONFIG_PM. # # Remove now-unneeded forward declarations. # # Signed-off-by: Andrew Morton # # drivers/net/via-velocity.c # 2004/06/22 00:30:35-04:00 akpm@osdl.org +94 -98 # via-velocity warning fixes # # ChangeSet # 2004/06/21 22:10:51-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: unneeded forward declarations # # Removal of unneeded forward declarations. # # drivers/net/via-velocity.c # 2004/06/21 17:46:19-04:00 romieu@fr.zoreil.com +0 -4 # via-velocity: unneeded forward declarations # # ChangeSet # 2004/06/21 22:10:42-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: ordering of Rx descriptors operations # # Force strict ordering of operations on Rx descriptors to avoid any issue # related to inline optimization. # # drivers/net/via-velocity.c # 2004/06/21 16:20:35-04:00 romieu@fr.zoreil.com +2 -0 # via-velocity: ordering of Rx descriptors operations # # ChangeSet # 2004/06/21 22:10:33-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: Rx copybreak # # Handle copybreak. # - velocity_rx_refill() is modified to allow the processing of a Rx desc # ring wherein the empty skb slots are not necessarily contiguous. Given # the preceeding changes, rx_copybreak should not need anything else; # - the driver does not rely on rd_info->skb_dma set to NULL any more; # - pci_dma_sync_single_for_{cpu/device} changes as a bonus; # - more function documentation. # # Some inspiration borrowed from similar r8169 code. # # drivers/net/via-velocity.c # 2004/06/21 15:50:55-04:00 romieu@fr.zoreil.com +72 -19 # via-velocity: Rx copybreak # # ChangeSet # 2004/06/21 22:09:48-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: Rx buffers allocation rework # # Rework of the Rx buffers allocation: # - Rx irq handler (velocity_rx_srv): defer the Rx buffer allocation until # the packet processing loop is done; # - a separate index related to the Rx descriptor ("rd_dirty") is introduced # to distinguish the first Rx descriptor whose buffer has to be refilled. # This way the driver does not need to confuse this descriptor with the # most recently netif()ed one. Rationale: batch + rx_copybreak; # - dirty/empty Rx descriptors are identified through the whole driver # via an adequate NULL pointer in the velocity_rd_info[] array (see # velocity_rx_refill() and velocity_receive_frame()); # - Rx descriptors need to be grouped by a multiple of 4 before they can # be handed back to the asic (hardware constraint). This task is moved # from the Rx processing loop to the Rx refill function; # - factorization of code in velocity_init_rd_ring(). # # drivers/net/via-velocity.h # 2004/06/21 15:38:34-04:00 romieu@fr.zoreil.com +2 -1 # via-velocity: Rx buffers allocation rework # # drivers/net/via-velocity.c # 2004/06/21 15:40:41-04:00 romieu@fr.zoreil.com +60 -47 # via-velocity: Rx buffers allocation rework # # ChangeSet # 2004/06/19 17:49:44-04:00 ralf@linux-mips.org # [PATCH] New driver for MV64340 GigE # # The Marvell MV64340 is a system controller with PCI and 3 integrated GigE # interfaces. # # Signed-off-by: Ralf Baechle # # drivers/net/mv64340_eth.h # 2004/06/19 17:14:57-04:00 ralf@linux-mips.org +601 -0 # New driver for MV64340 GigE # # drivers/net/mv64340_eth.h # 2004/06/19 17:14:57-04:00 ralf@linux-mips.org +0 -0 # BitKeeper file /spare/repo/netdev-2.6/mv64340/drivers/net/mv64340_eth.h # # drivers/net/mv64340_eth.c # 2004/06/19 17:14:53-04:00 ralf@linux-mips.org +2702 -0 # New driver for MV64340 GigE # # drivers/net/Makefile # 2004/06/19 17:14:12-04:00 ralf@linux-mips.org +2 -0 # New driver for MV64340 GigE # # drivers/net/Kconfig # 2004/06/19 17:12:58-04:00 ralf@linux-mips.org +28 -0 # New driver for MV64340 GigE # # drivers/net/mv64340_eth.c # 2004/06/19 17:14:53-04:00 ralf@linux-mips.org +0 -0 # BitKeeper file /spare/repo/netdev-2.6/mv64340/drivers/net/mv64340_eth.c # # ChangeSet # 2004/06/19 17:40:00-04:00 akpm@osdl.org # [PATCH] sis900-fix-phy-transceiver-detection.patch # # From: Daniele Venzano # # Fix PHY transceiver detection code to fall back to known PHY and not to the # last detected. # # The code checks every transceiver detected for link status and type, but fails # when ghost transceivers are detected, deciding to use the last one detected. # # With this patch the driver should choose the correct transceiver even when # some ghosts are detected by checking for the type of the tranceiver it is # going to use. # # drivers/net/sis900.c # 2004/05/19 15:40:44-04:00 akpm@osdl.org +14 -7 # sis900-fix-phy-transceiver-detection.patch # # ChangeSet # 2004/06/19 17:19:33-04:00 rl@hellgate.ch # [PATCH] Add WOL support # # Add rhine_shutdown callback to prepare Rhine power registers for # shutdown. # # Add rhine_get_wol/rhine_set_wol for ethtool ioctl. # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/15 12:36:12-04:00 rl@hellgate.ch +119 -4 # Add WOL support # # ChangeSet # 2004/06/19 17:19:25-04:00 rl@hellgate.ch # [PATCH] Small fixes and clean-up # # Bump driver version to mark recent major changes in driver code. # # Remove backoff parameter. The reason it was once introduced is gone. # Continue to go with EEPROM default for now, will hard-wire IEEE backoff # algorithm instead (later). # # Rhine-I needs extra time to recuperate from chip reset before EEPROM # reload. # # Add Rhine model identification. # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/18 20:00:00-04:00 rl@hellgate.ch +61 -49 # Small fixes and clean-up # # ChangeSet # 2004/06/19 17:19:16-04:00 rl@hellgate.ch # [PATCH] Media mode rewrite # # Remove rhine_check_duplex, rhine_timer and related data structures # # Add rhine_check_media: wrapper for generic mii_check_media, sets duplex # bit in MAC # # Add rhine_enable_linkmon, rhine_disable_linkmon to enable hardware link # status monitoring # # Update mdio_read, mdio_write accordingly # # Remove get_intr_status check in rhine_start_tx because we are not racing # anymore # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/18 20:00:00-04:00 rl@hellgate.ch +70 -124 # Media mode rewrite # # ChangeSet # 2004/06/19 17:19:08-04:00 rl@hellgate.ch # [PATCH] Fix Tx engine race for good # # It finally dawned on me how to eliminate the race I've been narrowing # down with earlier patches: Instead of writing the command registers as # one word, write them one at a time (as bytes). The race was for settings # bits in ChipCmd and ChipCmd1 (0x09) against the chip clearing CmdTxOn # which is in ChipCmd. # # In addition to writing single bytes, the fix requires a switch from # using bit 5 in ChipCmd0 to bit 5 in ChipCmd1 (which is equivalent) # to signal Tx demand. # # Also, don't restart Rx engine "pre-emptively" in rhine_rx, that's a # sure way to race with the chip. # # Introduce RHINE_WAIT_FOR, a macro for small busy loops with primitive # completion checking. # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/18 20:00:00-04:00 rl@hellgate.ch +45 -42 # Fix Tx engine race for good # # ChangeSet # 2004/06/19 17:18:59-04:00 rl@hellgate.ch # [PATCH] Remove options, full_duplex parameters # # Nobody complained although media locking parameters were broken # forever. They were sort of fixed recently, but the code is still in a # bad shape. # # More seriously, the options/full_duplex stuff has fundamental design # problems that have been discussed in-depth on the list (e.g. effect of # hotplugging, nameif, suspend/resume). # # For those needing media locking with Linux 2.6+ via-rhine, ethtool(8) # is the replacement. # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/18 20:00:00-04:00 rl@hellgate.ch +5 -52 # Remove options, full_duplex parameters # # ChangeSet # 2004/06/19 17:18:51-04:00 rl@hellgate.ch # [PATCH] Rewrite PHY detection # # Instead of probing, set phy_id to 1 for Rhine III and read phy_id from # EEPROM-controlled register for Rhine I/II. # # Remove code for handling anything other than 1 MII PHY. If it wasn't # unnecessary code to begin with, it would need to be fixed because it # wouldn't work. # # Use mii_if_info.phy_id as the only container of phy_id. Not particulary # happy about those names (phy_id vs. MII_PHYSIDx), but being consequent # about it mitigates confusion. # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/18 20:00:00-04:00 rl@hellgate.ch +25 -27 # Rewrite PHY detection # # ChangeSet # 2004/06/19 17:18:42-04:00 rl@hellgate.ch # [PATCH] Remove lingering PHY special casing # # All this code is broken (e.g. unconditionally programs all PHYs as if # they were the same model) and/or unused (IntrLinkChange never occurs # with driver as is). # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/18 20:00:00-04:00 rl@hellgate.ch +2 -13 # Remove lingering PHY special casing # # ChangeSet # 2004/06/19 17:18:33-04:00 rl@hellgate.ch # [PATCH] fix mc_filter on big-endian arch # # A.J. from VIA Networking Technologies noticed that via-rhine is using # cpu_to_le32() when preparing mc_filter hashes. This breaks Rhine hardware # multicast filters on big-endian architectures. # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/06 12:05:22-04:00 rl@hellgate.ch +1 -1 # fix mc_filter on big-endian arch # # ChangeSet # 2004/06/19 17:18:25-04:00 rl@hellgate.ch # [PATCH] Restructure reset code # # Restructure code to make it easier to maintain. # # rhine_hw_init: resets chip, reloads eeprom # rhine_chip_reset: chip reset + what used to be wait_for_reset # rhine_reload_eeprom: reload eeprom, re-enable MMIO, disable EEPROM-controlled # WOL # # Note: Chip reset clears a bunch of registers that should be reloaded # from EEPROM (which turns off MMIO). Deal with that later. # # Signed-off-by: Roger Luethi # # drivers/net/via-rhine.c # 2004/06/18 20:00:00-04:00 rl@hellgate.ch +58 -57 # Restructure reset code # # ChangeSet # 2004/06/19 17:06:39-04:00 akpm@osdl.org # [PATCH] fix via-velocity oopses # # - Don't register the inet_addr notifier if the hardware is absent. It # oopses when other interfaces are being upped. # # - Mark velocity_remove1() as __devexit_p in the pci_driver table. # # - c99ification. # # # Signed-off-by: Andrew Morton # # drivers/net/via-velocity.c # 2004/06/03 02:14:38-04:00 akpm@osdl.org +18 -11 # fix via-velocity oopses # # ChangeSet # 2004/06/19 17:02:30-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: velocity_receive_frame diets # # Weight loss in velocity_receive_frame(): # - isolate the ip header alignment tsk from velocity_receive_frame(); # - following p.30 of the datasheet, rdesc0.len includes the CRC length: # the amount of data copied during the ip alignment can be shortened. # # drivers/net/via-velocity.c # 2004/06/18 15:58:58-04:00 romieu@fr.zoreil.com +26 -10 # via-velocity: velocity_receive_frame diets # # ChangeSet # 2004/06/19 17:02:22-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: uniformize use of OWNED_BY_NIC # # Introduce velocity_give_rx_desc() to uniformize the use of OWNED_BY_NIC # through the driver. # # drivers/net/via-velocity.c # 2004/06/18 15:34:14-04:00 romieu@fr.zoreil.com +9 -4 # via-velocity: uniformize use of OWNED_BY_NIC # # ChangeSet # 2004/06/19 17:02:13-04:00 romieu@fr.zoreil.com # [PATCH] via-velocity: PCI ID move # # (Note: this serie requires a -mm based kernel as the via-velocity patches # are not included in Jeff's -netdev patches). # # # # PCI ID moved from the driver to the kernel database. A change for the # pci.ids file project at souceforge has been submitted as well. # # include/linux/pci_ids.h # 2004/06/18 15:24:53-04:00 romieu@fr.zoreil.com +1 -0 # via-velocity: PCI ID move # # drivers/net/via-velocity.h # 2004/06/18 15:32:00-04:00 romieu@fr.zoreil.com +0 -1 # via-velocity: PCI ID move # # drivers/net/via-velocity.c # 2004/06/18 15:32:01-04:00 romieu@fr.zoreil.com +3 -2 # via-velocity: PCI ID move # # ChangeSet # 2004/06/19 16:45:06-04:00 manfred@colorfullife.com # [PATCH] natsemi updates # # - support for external phys, both fibre and twisted pair, added: # * remove the "phy" parameter from mdio_{read,write}: the # function accesses the current phy. # * new functions to access external phys: miiport_{read,write} # * scan for external phys on _probe. # * ethtool support for switching between internal # and external phys. # * introduce an init_phy_fixup helper: a few settings must be # reapplied after reenabling the internal phy. # - move register_netdev to the end of _probe. The current position # could cause races with hotplug. # - do not wait for autonegotiation completed after initialization. # - use pci_name() instead of dev->name until register_netdev has # initialized dev->name. # - read the BMSR register in the link beat interrupt twice: # The link status field is latched, without reading twice a link up # event will be missed (and only noticed a few seconds later in the # media timer) # - restart the autonegotiation after modifying the capabilities. # # drivers/net/natsemi.c # 2004/06/19 13:03:58-04:00 manfred@colorfullife.com +678 -183 # natsemi updates # # ChangeSet # 2004/06/10 20:12:44-04:00 scott.feldman@intel.com # [PATCH] e100: use NAPI mode all the time # # I see no reason to keep the non-NAPI option for e100. This patch removes # the CONFIG_E100_NAPI option and puts the driver in NAPI mode all the time. # Matches the way tg3 works. # # Unless someone has a really good reason to keep the non-NAPI mode, this # should go in for 2.6.7. # # -scott # # drivers/net/e100.c # 2004/06/04 19:02:04-04:00 scott.feldman@intel.com +5 -24 # e100: use NAPI mode all the time # # drivers/net/Kconfig # 2004/06/04 19:02:34-04:00 scott.feldman@intel.com +0 -4 # e100: use NAPI mode all the time # # ChangeSet # 2004/06/02 20:03:26-04:00 romieu@fr.zoreil.com # [PATCH] r8169: tx lock removal # # spinlock removal crusade. # # The patch rephrases the spinlock_irq() in rtl8169_start_xmit() and its # companion in the Tx irq handling patch in terms of ordered operations. # # drivers/net/r8169.c # 2004/06/02 18:15:37-04:00 romieu@fr.zoreil.com +10 -9 # r8169: tx lock removal # # ChangeSet # 2004/06/02 20:03:18-04:00 romieu@fr.zoreil.com # [PATCH] r8169: gcc bug workaround # # Add a temporary variable to workaround gcc 2.95.3 bug. # # drivers/net/r8169.c # 2004/06/02 18:15:34-04:00 romieu@fr.zoreil.com +5 -2 # r8169: gcc bug workaround # # ChangeSet # 2004/06/02 20:03:10-04:00 romieu@fr.zoreil.com # [PATCH] r8169: initial link setup rework # # Use rtl8169_set_speed() for link setup in rtl8169_init_one(): # - the code which handles the option checking is isolated; # - display (once) a notice message about the deprecated interface; # - rtl8169_open() enables the phy timer if the link is not up; # - rtl8169_set_speed() checks that the netdevice is actually ready # in order to activate the timer. # # drivers/net/r8169.c # 2004/06/02 17:18:02-04:00 romieu@fr.zoreil.com +43 -93 # r8169: initial link setup rework # # ChangeSet # 2004/06/02 20:03:02-04:00 romieu@fr.zoreil.com # [PATCH] r8169: link handling and phy reset rework # # Link handling changes (Andy Lutomirski ): # - removed rtl8169_hw_phy_reset() and its busy loop; # - RTL8169_PHY_TIMEOUT is x10 to account for the removal of the # phy_link_down_cnt loop in rtl8169_phy_timer(); # - added spinlocking in timer context for rtl8169_phy_timer to avoid # messing with the {set/get}_settings commands issued via ethtool; # - more TBI stuff. # # This patch differs from the former version on the following points: # - the LinkChg irq does not enable the phy timer when the link goes # down any more; # - the phy timer is not enabled in rtl8169_set_speed(); # - removal of the initial renegotiation hack. # # drivers/net/r8169.c # 2004/06/02 17:17:50-04:00 romieu@fr.zoreil.com +94 -50 # r8169: link handling and phy reset rework # # ChangeSet # 2004/05/27 13:58:44-04:00 romieu@fr.zoreil.com # [PATCH] r8169: ethtool .get_{settings/link} # # - ethtool get_settings() for r8169 (Andy Lutomirski ); # - rtl8169_ethtool_ops.get_drvinfo() is set as well; # - added some bits to handle the TBI status. # # The locking does not need to be specially clever. # # drivers/net/r8169.c # 2004/05/20 07:15:59-04:00 romieu@fr.zoreil.com +87 -0 # r8169: ethtool .get_{settings/link} # # ChangeSet # 2004/05/27 13:58:35-04:00 romieu@fr.zoreil.com # [PATCH] r8169: ethtool .set_settings # # ethtool set_settings support (Andy Lutomirski ). # The initial code has been modified so that the settings of parameters # for TBI and normal mode do not step on each others shoes. # # drivers/net/r8169.c # 2004/05/20 07:03:25-04:00 romieu@fr.zoreil.com +97 -0 # r8169: ethtool .set_settings # # ChangeSet # 2004/05/17 14:07:14-04:00 romieu@fr.zoreil.com # [PATCH] r8169: janitoring # # Spring cleanup # - unsigned int (u32) should be slightly faster on ppc64 (Jon D Mason); # - misc minor de-uglyfication. # # drivers/net/r8169.c # 2004/05/14 17:08:45-04:00 romieu@fr.zoreil.com +47 -47 # 2.6.6-mm2 - r8169 janitoring # # ChangeSet # 2004/05/17 14:07:06-04:00 romieu@fr.zoreil.com # [PATCH] r8169: cosmetic renaming of a register # # RxUnderrun status bit renamed to LinkChg (identical to the 8139cp driver). # # drivers/net/r8169.c # 2004/05/14 17:08:47-04:00 romieu@fr.zoreil.com +5 -5 # 2.6.6-mm2 - r8169 register rename # # ChangeSet # 2004/05/17 14:06:58-04:00 romieu@fr.zoreil.com # [PATCH] r8169: napi support # # Napi for r8169 (Jon D Mason ). # Both Tx and Rx processing are moved to the ->poll() function. # # drivers/net/r8169.c # 2004/05/14 17:07:18-04:00 romieu@fr.zoreil.com +78 -11 # 2.6.6-mm2 - r8169 napi # # drivers/net/Kconfig # 2004/05/14 17:07:18-04:00 romieu@fr.zoreil.com +5 -0 # 2.6.6-mm2 - r8169 napi # # ChangeSet # 2004/04/21 23:30:47-04:00 romieu@fr.zoreil.com # [PATCH] epic100: code removal in irq handler # # The loop in the irq handler is not needed any more as the high frequency # events have been deferred due to napi usage. # # drivers/net/epic100.c # 2004/04/19 18:37:16-04:00 romieu@fr.zoreil.com +52 -64 # 2.6.6-rc1-mm1 - code removal in the epic100 irq handler # # ChangeSet # 2004/04/21 23:30:40-04:00 romieu@fr.zoreil.com # [PATCH] epic100: spin_unlock_irqrestore avoidance # # This patch avoids to duplicate a spin_unlock: # - it is mostly an artifact due to wild goto; # - it makes the generated code smaller. # # drivers/net/epic100.c # 2004/04/19 18:37:05-04:00 romieu@fr.zoreil.com +16 -10 # 2.6.6-rc1-mm1 - spin_unlock_irqrestore avoidance in epic100 # # ChangeSet # 2004/03/25 23:52:21-05:00 romieu@fr.zoreil.com # [netdrvr epic100] napi fixes # # Multiple invocation of __netif_rx_schedule() in epic_interrupt() while # epic_poll loops over __netif_rx_complete() leads to serious device # refcount leak. # # drivers/net/epic100.c # 2004/03/25 23:52:16-05:00 romieu@fr.zoreil.com +18 -15 # [netdrvr epic100] napi fixes # # Multiple invocation of __netif_rx_schedule() in epic_interrupt() while # epic_poll loops over __netif_rx_complete() leads to serious device # refcount leak. # # ChangeSet # 2004/03/22 19:07:18-05:00 romieu@fr.zoreil.com # [netdrvr epic100] napi 3/3 - transmit path # # drivers/net/epic100.c # 2004/03/22 18:18:40-05:00 romieu@fr.zoreil.com +9 -11 # 2.6.5-rc2 - epic100 napi # # ChangeSet # 2004/03/22 19:07:11-05:00 romieu@fr.zoreil.com # [netdrvr epic100] napi 2/3 - receive path # # drivers/net/epic100.c # 2004/03/22 18:18:33-05:00 romieu@fr.zoreil.com +116 -21 # 2.6.5-rc2 - epic100 napi # # ChangeSet # 2004/03/22 19:07:03-05:00 romieu@fr.zoreil.com # [netdrvr epic100] napi 1/3 - just shuffle some code around # # Isolate the classical TX part of epic_interrupt. Innocent code shuffling. # # drivers/net/epic100.c # 2004/03/22 16:53:18-05:00 romieu@fr.zoreil.com +76 -61 # 2.6.5-rc2 - epic100 napi # # ChangeSet # 2004/03/22 19:06:56-05:00 romieu@fr.zoreil.com # [netdrvr epic100] minor cleanups # # - extra pci_disable_device() to balance invocation of pci_enable_device() # in epic_init_one() (-> error path + epic_remove_one()); # - lazy return status in epic_init_one(), tsss...; # - memory dedicated to Rx descriptors was not freed after failure of # register_netdev() in epic_init_one(); # - use of epic_pause() in epic_close() offers a small window for a late # interruption just before the final free_irq(). Let's close the window to # avoid two epic_rx() threads racing with each other. # # drivers/net/epic100.c # 2004/03/22 16:53:16-05:00 romieu@fr.zoreil.com +40 -19 # 2.6.5-rc2 - epic100 fixup # diff -Nru a/drivers/net/Kconfig b/drivers/net/Kconfig --- a/drivers/net/Kconfig 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/Kconfig 2004-07-13 12:46:19 -07:00 @@ -1746,6 +1746,7 @@ tristate "VIA Velocity support" depends on NET_PCI && PCI select CRC32 + select CRC16 select MII help If you have a VIA "Velocity" based network card say Y here. @@ -2043,6 +2044,11 @@ To compile this driver as a module, choose M here: the module will be called r8169. This is recommended. +config R8169_NAPI + bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)" + depends on R8169 && EXPERIMENTAL + + config SK98LIN tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support" depends on PCI @@ -2130,6 +2136,45 @@ To compile this driver as a module, choose M here: the module will be called tg3. This is recommended. + +config MV64340_ETH + tristate "MV-64340 Ethernet support" + depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX + help + This driver supports the gigabit Ethernet on the Marvell MV64340 + chipset which is used in the Momenco Ocelot C and Jaguar ATX. + +config MV64340_ETH_0 + bool "MV-64340 Port 0" + depends on MV64340_ETH + help + This enables support for Port 0 of the Marvell MV64340 Gigabit + Ethernet. + +config MV64340_ETH_1 + bool "MV-64340 Port 1" + depends on MV64340_ETH + help + This enables support for Port 1 of the Marvell MV64340 Gigabit + Ethernet. + +config MV64340_ETH_2 + bool "MV-64340 Port 2" + depends on MV64340_ETH + help + This enables support for Port 2 of the Marvell MV64340 Gigabit + Ethernet. + +config GIANFAR + tristate "Gianfar Ethernet" + depends on 85xx + help + This driver supports the Gigabit TSEC on the MPC85xx + family of chips, and the FEC on the 8540 + +config GFAR_NAPI + bool "NAPI Support" + depends on GIANFAR endmenu diff -Nru a/drivers/net/Makefile b/drivers/net/Makefile --- a/drivers/net/Makefile 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/Makefile 2004-07-13 12:46:19 -07:00 @@ -10,6 +10,7 @@ obj-$(CONFIG_IBM_EMAC) += ibm_emac/ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_BONDING) += bonding/ +obj-$(CONFIG_GIANFAR) += gianfar.o gianfar_ethtool.o gianfar_phy.o # # link order important here @@ -94,6 +95,8 @@ obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o + +obj-$(CONFIG_MV64340_ETH) += mv64340_eth.o obj-$(CONFIG_PPP) += ppp_generic.o slhc.o obj-$(CONFIG_PPP_ASYNC) += ppp_async.o diff -Nru a/drivers/net/e100.c b/drivers/net/e100.c --- a/drivers/net/e100.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/e100.c 2004-07-13 12:46:19 -07:00 @@ -87,9 +87,8 @@ * cb_to_use is the next CB to use for queuing a command; cb_to_clean * is the next CB to check for completion; cb_to_send is the first * CB to start on in case of a previous failure to resume. CB clean - * up happens in interrupt context in response to a CU interrupt, or - * in dev->poll in the case where NAPI is enabled. cbs_avail keeps - * track of number of free CB resources available. + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. * * Hardware padding of short packets to minimum packet size is * enabled. 82557 pads with 7Eh, while the later controllers pad @@ -112,9 +111,8 @@ * replacement RFDs cannot be allocated, or the RU goes non-active, * the RU must be restarted. Frame arrival generates an interrupt, * and Rx indication and re-allocation happen in the same context, - * therefore no locking is required. If NAPI is enabled, this work - * happens in dev->poll. A software-generated interrupt is gen- - * erated from the watchdog to recover from a failed allocation + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation * senario where all Rx resources have been indicated and none re- * placed. * @@ -126,8 +124,6 @@ * supported. Tx Scatter/Gather is not supported. Jumbo Frames is * not supported (hardware limitation). * - * NAPI support is enabled with CONFIG_E100_NAPI. - * * MagicPacket(tm) WoL support is enabled/disabled via ethtool. * * Thanks to JC (jchapman@katalix.com) for helping with @@ -158,7 +154,7 @@ #define DRV_NAME "e100" -#define DRV_VERSION "3.0.18" +#define DRV_VERSION "3.0.22-NAPI" #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation" #define PFX DRV_NAME ": " @@ -1463,11 +1459,7 @@ nic->net_stats.rx_packets++; nic->net_stats.rx_bytes += actual_size; nic->netdev->last_rx = jiffies; -#ifdef CONFIG_E100_NAPI netif_receive_skb(skb); -#else - netif_rx(skb); -#endif if(work_done) (*work_done)++; } @@ -1562,20 +1554,12 @@ if(stat_ack & stat_ack_rnr) nic->ru_running = 0; -#ifdef CONFIG_E100_NAPI e100_disable_irq(nic); netif_rx_schedule(netdev); -#else - if(stat_ack & stat_ack_rx) - e100_rx_clean(nic, NULL, 0); - if(stat_ack & stat_ack_tx) - e100_tx_clean(nic); -#endif return IRQ_HANDLED; } -#ifdef CONFIG_E100_NAPI static int e100_poll(struct net_device *netdev, int *budget) { struct nic *nic = netdev_priv(netdev); @@ -1598,7 +1582,6 @@ return 1; } -#endif #ifdef CONFIG_NET_POLL_CONTROLLER static void e100_netpoll(struct net_device *netdev) @@ -2135,10 +2118,8 @@ SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); netdev->tx_timeout = e100_tx_timeout; netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; -#ifdef CONFIG_E100_NAPI netdev->poll = e100_poll; netdev->weight = E100_NAPI_WEIGHT; -#endif #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = e100_netpoll; #endif diff -Nru a/drivers/net/epic100.c b/drivers/net/epic100.c --- a/drivers/net/epic100.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/epic100.c 2004-07-13 12:46:19 -07:00 @@ -80,8 +80,6 @@ These may be modified when a driver module is loaded.*/ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ -/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ -static int max_interrupt_work = 32; /* Used to pass the full-duplex flag, etc. */ #define MAX_UNITS 8 /* More are supported, limit only on options */ @@ -99,9 +97,9 @@ Making the Tx ring too large decreases the effectiveness of channel bonding and packet priority. There are no ill effects from too-large receive rings. */ -#define TX_RING_SIZE 16 -#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ -#define RX_RING_SIZE 32 +#define TX_RING_SIZE 256 +#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */ +#define RX_RING_SIZE 256 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc) @@ -152,12 +150,10 @@ MODULE_LICENSE("GPL"); MODULE_PARM(debug, "i"); -MODULE_PARM(max_interrupt_work, "i"); MODULE_PARM(rx_copybreak, "i"); MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)"); -MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt"); MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex"); MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)"); @@ -289,6 +285,12 @@ StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80, }; +#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */ + +#define EpicNapiEvent (TxEmpty | TxDone | \ + RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) +#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) + static u16 media2miictl[16] = { 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; @@ -327,9 +329,12 @@ /* Ring pointers. */ spinlock_t lock; /* Group with Tx control cache line. */ + spinlock_t napi_lock; + unsigned int reschedule_in_poll; unsigned int cur_tx, dirty_tx; unsigned int cur_rx, dirty_rx; + u32 irq_mask; unsigned int rx_buf_sz; /* Based on MTU+slack. */ struct pci_dev *pci_dev; /* PCI bus location. */ @@ -356,7 +361,8 @@ static void epic_tx_timeout(struct net_device *dev); static void epic_init_ring(struct net_device *dev); static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev); -static int epic_rx(struct net_device *dev); +static int epic_rx(struct net_device *dev, int budget); +static int epic_poll(struct net_device *dev, int *budget); static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static struct ethtool_ops netdev_ethtool_ops; @@ -375,7 +381,7 @@ int irq; struct net_device *dev; struct epic_private *ep; - int i, option = 0, duplex = 0; + int i, ret, option = 0, duplex = 0; void *ring_space; dma_addr_t ring_dma; @@ -389,29 +395,33 @@ card_idx++; - i = pci_enable_device(pdev); - if (i) - return i; + ret = pci_enable_device(pdev); + if (ret) + goto out; irq = pdev->irq; if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) { printk (KERN_ERR "card %d: no PCI region space\n", card_idx); - return -ENODEV; + ret = -ENODEV; + goto err_out_disable; } pci_set_master(pdev); + ret = pci_request_regions(pdev, DRV_NAME); + if (ret < 0) + goto err_out_disable; + + ret = -ENOMEM; + dev = alloc_etherdev(sizeof (*ep)); if (!dev) { printk (KERN_ERR "card %d: no memory for eth device\n", card_idx); - return -ENOMEM; + goto err_out_free_res; } SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); - if (pci_request_regions(pdev, DRV_NAME)) - goto err_out_free_netdev; - #ifdef USE_IO_OPS ioaddr = pci_resource_start (pdev, 0); #else @@ -419,7 +429,7 @@ ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1)); if (!ioaddr) { printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx); - goto err_out_free_res; + goto err_out_free_netdev; } #endif @@ -456,7 +466,9 @@ dev->base_addr = ioaddr; dev->irq = irq; - spin_lock_init (&ep->lock); + spin_lock_init(&ep->lock); + spin_lock_init(&ep->napi_lock); + ep->reschedule_in_poll = 0; /* Bring the chip out of low-power mode. */ outl(0x4200, ioaddr + GENCTL); @@ -486,6 +498,9 @@ ep->pci_dev = pdev; ep->chip_id = chip_idx; ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; + ep->irq_mask = + (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) + | CntFull | TxUnderrun | EpicNapiEvent; /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but @@ -540,10 +555,12 @@ dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->tx_timeout = &epic_tx_timeout; + dev->poll = epic_poll; + dev->weight = 64; - i = register_netdev(dev); - if (i) - goto err_out_unmap_tx; + ret = register_netdev(dev); + if (ret < 0) + goto err_out_unmap_rx; printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq); @@ -551,19 +568,24 @@ printk("%2.2x:", dev->dev_addr[i]); printk("%2.2x.\n", dev->dev_addr[i]); - return 0; +out: + return ret; +err_out_unmap_rx: + pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); err_out_unmap_tx: pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); err_out_iounmap: #ifndef USE_IO_OPS iounmap(ioaddr); -err_out_free_res: -#endif - pci_release_regions(pdev); err_out_free_netdev: +#endif free_netdev(dev); - return -ENODEV; +err_out_free_res: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); + goto out; } /* Serial EEPROM section. */ @@ -589,6 +611,38 @@ #define EE_READ256_CMD (6 << 8) #define EE_ERASE_CMD (7 << 6) +static void epic_disable_int(struct net_device *dev, struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + + outl(0x00000000, ioaddr + INTMASK); +} + +static inline void __epic_pci_commit(long ioaddr) +{ +#ifndef USE_IO_OPS + inl(ioaddr + INTMASK); +#endif +} + +static inline void epic_napi_irq_off(struct net_device *dev, + struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + + outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK); + __epic_pci_commit(ioaddr); +} + +static inline void epic_napi_irq_on(struct net_device *dev, + struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + + /* No need to commit possible posted write */ + outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK); +} + static int __devinit read_eeprom(long ioaddr, int location) { int i; @@ -749,9 +803,8 @@ /* Enable interrupts by setting the interrupt mask. */ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) - | CntFull | TxUnderrun | TxDone | TxEmpty - | RxError | RxOverflow | RxFull | RxHeader | RxDone, - ioaddr + INTMASK); + | CntFull | TxUnderrun + | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); if (debug > 1) printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " @@ -792,7 +845,7 @@ } /* Remove the packets on the Rx queue. */ - epic_rx(dev); + epic_rx(dev, RX_RING_SIZE); } static void epic_restart(struct net_device *dev) @@ -838,9 +891,9 @@ /* Enable interrupts by setting the interrupt mask. */ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) - | CntFull | TxUnderrun | TxDone | TxEmpty - | RxError | RxOverflow | RxFull | RxHeader | RxDone, - ioaddr + INTMASK); + | CntFull | TxUnderrun + | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); + printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" " interrupt %4.4x.\n", dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), @@ -926,7 +979,6 @@ int i; ep->tx_full = 0; - ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED; ep->dirty_tx = ep->cur_tx = 0; ep->cur_rx = ep->dirty_rx = 0; ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); @@ -1026,6 +1078,76 @@ return 0; } +static void epic_tx_error(struct net_device *dev, struct epic_private *ep, + int status) +{ + struct net_device_stats *stats = &ep->stats; + +#ifndef final_version + /* There was an major error, log it. */ + if (debug > 1) + printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", + dev->name, status); +#endif + stats->tx_errors++; + if (status & 0x1050) + stats->tx_aborted_errors++; + if (status & 0x0008) + stats->tx_carrier_errors++; + if (status & 0x0040) + stats->tx_window_errors++; + if (status & 0x0010) + stats->tx_fifo_errors++; +} + +static void epic_tx(struct net_device *dev, struct epic_private *ep) +{ + unsigned int dirty_tx, cur_tx; + + /* + * Note: if this lock becomes a problem we can narrow the locked + * region at the cost of occasionally grabbing the lock more times. + */ + cur_tx = ep->cur_tx; + for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { + struct sk_buff *skb; + int entry = dirty_tx % TX_RING_SIZE; + int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus); + + if (txstatus & DescOwn) + break; /* It still hasn't been Txed */ + + if (likely(txstatus & 0x0001)) { + ep->stats.collisions += (txstatus >> 8) & 15; + ep->stats.tx_packets++; + ep->stats.tx_bytes += ep->tx_skbuff[entry]->len; + } else + epic_tx_error(dev, ep, txstatus); + + /* Free the original skb. */ + skb = ep->tx_skbuff[entry]; + pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + ep->tx_skbuff[entry] = 0; + } + +#ifndef final_version + if (cur_tx - dirty_tx > TX_RING_SIZE) { + printk(KERN_WARNING + "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", + dev->name, dirty_tx, cur_tx, ep->tx_full); + dirty_tx += TX_RING_SIZE; + } +#endif + ep->dirty_tx = dirty_tx; + if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { + /* The ring is no longer full, allow new TX entries. */ + ep->tx_full = 0; + netif_wake_queue(dev); + } +} + /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs) @@ -1033,135 +1155,71 @@ struct net_device *dev = dev_instance; struct epic_private *ep = dev->priv; long ioaddr = dev->base_addr; - int status, boguscnt = max_interrupt_work; unsigned int handled = 0; + int status; - do { - status = inl(ioaddr + INTSTAT); - /* Acknowledge all of the current interrupt sources ASAP. */ - outl(status & 0x00007fff, ioaddr + INTSTAT); - - if (debug > 4) - printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " - "intstat=%#8.8x.\n", - dev->name, status, (int)inl(ioaddr + INTSTAT)); - - if ((status & IntrSummary) == 0) - break; - handled = 1; + status = inl(ioaddr + INTSTAT); + /* Acknowledge all of the current interrupt sources ASAP. */ + outl(status & EpicNormalEvent, ioaddr + INTSTAT); - if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow)) - epic_rx(dev); + if (debug > 4) { + printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " + "intstat=%#8.8x.\n", dev->name, status, + (int)inl(ioaddr + INTSTAT)); + } - if (status & (TxEmpty | TxDone)) { - unsigned int dirty_tx, cur_tx; + if ((status & IntrSummary) == 0) + goto out; - /* Note: if this lock becomes a problem we can narrow the locked - region at the cost of occasionally grabbing the lock more - times. */ - spin_lock(&ep->lock); - cur_tx = ep->cur_tx; - dirty_tx = ep->dirty_tx; - for (; cur_tx - dirty_tx > 0; dirty_tx++) { - struct sk_buff *skb; - int entry = dirty_tx % TX_RING_SIZE; - int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus); + handled = 1; - if (txstatus & DescOwn) - break; /* It still hasn't been Txed */ + if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { + spin_lock(&ep->napi_lock); + if (netif_rx_schedule_prep(dev)) { + epic_napi_irq_off(dev, ep); + __netif_rx_schedule(dev); + } else + ep->reschedule_in_poll++; + spin_unlock(&ep->napi_lock); + } + status &= ~EpicNapiEvent; - if ( ! (txstatus & 0x0001)) { - /* There was an major error, log it. */ -#ifndef final_version - if (debug > 1) - printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", - dev->name, txstatus); -#endif - ep->stats.tx_errors++; - if (txstatus & 0x1050) ep->stats.tx_aborted_errors++; - if (txstatus & 0x0008) ep->stats.tx_carrier_errors++; - if (txstatus & 0x0040) ep->stats.tx_window_errors++; - if (txstatus & 0x0010) ep->stats.tx_fifo_errors++; - } else { - ep->stats.collisions += (txstatus >> 8) & 15; - ep->stats.tx_packets++; - ep->stats.tx_bytes += ep->tx_skbuff[entry]->len; - } - - /* Free the original skb. */ - skb = ep->tx_skbuff[entry]; - pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); - ep->tx_skbuff[entry] = 0; - } + /* Check uncommon events all at once. */ + if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { + if (status == EpicRemoved) + goto out; -#ifndef final_version - if (cur_tx - dirty_tx > TX_RING_SIZE) { - printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", - dev->name, dirty_tx, cur_tx, ep->tx_full); - dirty_tx += TX_RING_SIZE; - } -#endif - ep->dirty_tx = dirty_tx; - if (ep->tx_full - && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { - /* The ring is no longer full, allow new TX entries. */ - ep->tx_full = 0; - spin_unlock(&ep->lock); - netif_wake_queue(dev); - } else - spin_unlock(&ep->lock); - } + /* Always update the error counts to avoid overhead later. */ + ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); + ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); + ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); - /* Check uncommon events all at once. */ - if (status & (CntFull | TxUnderrun | RxOverflow | RxFull | - PCIBusErr170 | PCIBusErr175)) { - if (status == 0xffffffff) /* Chip failed or removed (CardBus). */ - break; - /* Always update the error counts to avoid overhead later. */ - ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); - ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); - ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); - - if (status & TxUnderrun) { /* Tx FIFO underflow. */ - ep->stats.tx_fifo_errors++; - outl(ep->tx_threshold += 128, ioaddr + TxThresh); - /* Restart the transmit process. */ - outl(RestartTx, ioaddr + COMMAND); - } - if (status & RxOverflow) { /* Missed a Rx frame. */ - ep->stats.rx_errors++; - } - if (status & (RxOverflow | RxFull)) - outw(RxQueued, ioaddr + COMMAND); - if (status & PCIBusErr170) { - printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n", - dev->name, status); - epic_pause(dev); - epic_restart(dev); - } - /* Clear all error sources. */ - outl(status & 0x7f18, ioaddr + INTSTAT); + if (status & TxUnderrun) { /* Tx FIFO underflow. */ + ep->stats.tx_fifo_errors++; + outl(ep->tx_threshold += 128, ioaddr + TxThresh); + /* Restart the transmit process. */ + outl(RestartTx, ioaddr + COMMAND); } - if (--boguscnt < 0) { - printk(KERN_ERR "%s: Too much work at interrupt, " - "IntrStatus=0x%8.8x.\n", - dev->name, status); - /* Clear all interrupt sources. */ - outl(0x0001ffff, ioaddr + INTSTAT); - break; + if (status & PCIBusErr170) { + printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", + dev->name, status); + epic_pause(dev); + epic_restart(dev); } - } while (1); + /* Clear all error sources. */ + outl(status & 0x7f18, ioaddr + INTSTAT); + } - if (debug > 3) - printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n", - dev->name, status); +out: + if (debug > 3) { + printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n", + dev->name, status); + } return IRQ_RETVAL(handled); } -static int epic_rx(struct net_device *dev) +static int epic_rx(struct net_device *dev, int budget) { struct epic_private *ep = dev->priv; int entry = ep->cur_rx % RX_RING_SIZE; @@ -1171,6 +1229,10 @@ if (debug > 4) printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry, ep->rx_ring[entry].rxstatus); + + if (rx_work_limit > budget) + rx_work_limit = budget; + /* If we own the next entry, it's a new packet. Send it up. */ while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) { int status = le32_to_cpu(ep->rx_ring[entry].rxstatus); @@ -1226,7 +1288,7 @@ ep->rx_skbuff[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); + netif_receive_skb(skb); dev->last_rx = jiffies; ep->stats.rx_packets++; ep->stats.rx_bytes += pkt_len; @@ -1254,6 +1316,65 @@ return work_done; } +static void epic_rx_err(struct net_device *dev, struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + int status; + + status = inl(ioaddr + INTSTAT); + + if (status == EpicRemoved) + return; + if (status & RxOverflow) /* Missed a Rx frame. */ + ep->stats.rx_errors++; + if (status & (RxOverflow | RxFull)) + outw(RxQueued, ioaddr + COMMAND); +} + +static int epic_poll(struct net_device *dev, int *budget) +{ + struct epic_private *ep = dev->priv; + int work_done, orig_budget; + long ioaddr = dev->base_addr; + + orig_budget = (*budget > dev->quota) ? dev->quota : *budget; + +rx_action: + + epic_tx(dev, ep); + + work_done = epic_rx(dev, *budget); + + epic_rx_err(dev, ep); + + *budget -= work_done; + dev->quota -= work_done; + + if (netif_running(dev) && (work_done < orig_budget)) { + unsigned long flags; + int more; + + /* A bit baroque but it avoids a (space hungry) spin_unlock */ + + spin_lock_irqsave(&ep->napi_lock, flags); + + more = ep->reschedule_in_poll; + if (!more) { + __netif_rx_complete(dev); + outl(EpicNapiEvent, ioaddr + INTSTAT); + epic_napi_irq_on(dev, ep); + } else + ep->reschedule_in_poll--; + + spin_unlock_irqrestore(&ep->napi_lock, flags); + + if (more) + goto rx_action; + } + + return (work_done >= orig_budget); +} + static int epic_close(struct net_device *dev) { long ioaddr = dev->base_addr; @@ -1268,9 +1389,13 @@ dev->name, (int)inl(ioaddr + INTSTAT)); del_timer_sync(&ep->timer); - epic_pause(dev); + + epic_disable_int(dev, ep); + free_irq(dev->irq, dev); + epic_pause(dev); + /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { skb = ep->rx_skbuff[i]; @@ -1491,6 +1616,7 @@ #endif pci_release_regions(pdev); free_netdev(dev); + pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); /* pci_power_off(pdev, -1); */ } diff -Nru a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c --- a/drivers/net/forcedeth.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/forcedeth.c 2004-07-13 12:46:19 -07:00 @@ -10,8 +10,11 @@ * trademarks of NVIDIA Corporation in the United States and other * countries. * - * Copyright (C) 2003 Manfred Spraul + * Copyright (C) 2003,4 Manfred Spraul * Copyright (C) 2004 Andrew de Quincey (wol support) + * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane + * IRQ rate fixes, bigendian fixes, cleanups, verification) + * Copyright (c) 2004 NVIDIA Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -60,15 +63,18 @@ * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac * addresses, really stop rx if already running * in nv_start_rx, clean up a bit. - * (C) Carl-Daniel Hailfinger * 0.20: 07 Dec 2003: alloc fixes * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup - * on close. - * (C) Carl-Daniel Hailfinger, Manfred Spraul + * on close. * 0.23: 26 Jan 2004: various small cleanups * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces * 0.25: 09 Mar 2004: wol support + * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes + * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, + * added CK804/MCP04 device IDs, code fixes + * for registers, link status and other minor fixes. + * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. @@ -80,7 +86,7 @@ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * superfluous timer interrupts from the nic. */ -#define FORCEDETH_VERSION "0.25" +#define FORCEDETH_VERSION "0.28" #define DRV_NAME "forcedeth" #include @@ -124,6 +130,7 @@ #define NVREG_IRQSTAT_MIIEVENT 0x040 #define NVREG_IRQSTAT_MASK 0x1ff NvRegIrqMask = 0x004, +#define NVREG_IRQ_RX_ERROR 0x0001 #define NVREG_IRQ_RX 0x0002 #define NVREG_IRQ_RX_NOBUF 0x0004 #define NVREG_IRQ_TX_ERR 0x0008 @@ -133,7 +140,7 @@ #define NVREG_IRQ_TX1 0x0100 #define NVREG_IRQMASK_WANTED_1 0x005f #define NVREG_IRQMASK_WANTED_2 0x0147 -#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1)) +#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1)) NvRegUnknownSetupReg6 = 0x008, #define NVREG_UNKSETUP6_VAL 3 @@ -160,7 +167,7 @@ NvRegOffloadConfig = 0x90, #define NVREG_OFFLOAD_HOMEPHY 0x601 -#define NVREG_OFFLOAD_NORMAL 0x5ee +#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE NvRegReceiverControl = 0x094, #define NVREG_RCVCTL_START 0x01 NvRegReceiverStatus = 0x98, @@ -169,6 +176,8 @@ NvRegRandomSeed = 0x9c, #define NVREG_RNDSEED_MASK 0x00ff #define NVREG_RNDSEED_FORCE 0x7f00 +#define NVREG_RNDSEED_FORCE2 0x2d00 +#define NVREG_RNDSEED_FORCE3 0x7400 NvRegUnknownSetupReg1 = 0xA0, #define NVREG_UNKSETUP1_VAL 0x16070f @@ -182,6 +191,9 @@ NvRegMulticastMaskA = 0xB8, NvRegMulticastMaskB = 0xBC, + NvRegPhyInterface = 0xC0, +#define PHY_RGMII 0x10000000 + NvRegTxRingPhysAddr = 0x100, NvRegRxRingPhysAddr = 0x104, NvRegRingSizes = 0x108, @@ -190,12 +202,12 @@ NvRegUnknownTransmitterReg = 0x10c, NvRegLinkSpeed = 0x110, #define NVREG_LINKSPEED_FORCE 0x10000 -#define NVREG_LINKSPEED_10 10 +#define NVREG_LINKSPEED_10 1000 #define NVREG_LINKSPEED_100 100 -#define NVREG_LINKSPEED_1000 1000 +#define NVREG_LINKSPEED_1000 50 NvRegUnknownSetupReg5 = 0x130, #define NVREG_UNKSETUP5_BIT31 (1<<31) - NvRegUnknownSetupReg3 = 0x134, + NvRegUnknownSetupReg3 = 0x13c, #define NVREG_UNKSETUP3_VAL1 0x200010 NvRegTxRxControl = 0x144, #define NVREG_TXRXCTL_KICK 0x0001 @@ -214,15 +226,15 @@ NvRegAdapterControl = 0x188, #define NVREG_ADAPTCTL_START 0x02 #define NVREG_ADAPTCTL_LINKUP 0x04 -#define NVREG_ADAPTCTL_PHYVALID 0x4000 +#define NVREG_ADAPTCTL_PHYVALID 0x40000 #define NVREG_ADAPTCTL_RUNNING 0x100000 #define NVREG_ADAPTCTL_PHYSHIFT 24 NvRegMIISpeed = 0x18c, #define NVREG_MIISPEED_BIT8 (1<<8) #define NVREG_MIIDELAY 5 NvRegMIIControl = 0x190, -#define NVREG_MIICTL_INUSE 0x10000 -#define NVREG_MIICTL_WRITE 0x08000 +#define NVREG_MIICTL_INUSE 0x08000 +#define NVREG_MIICTL_WRITE 0x00400 #define NVREG_MIICTL_ADDRSHIFT 5 NvRegMIIData = 0x194, NvRegWakeUpFlags = 0x200, @@ -254,34 +266,63 @@ #define NVREG_POWERSTATE_D3 0x0003 }; +/* Big endian: should work, but is untested */ struct ring_desc { u32 PacketBuffer; - u16 Length; - u16 Flags; + u32 FlagLen; }; -#define NV_TX_LASTPACKET (1<<0) -#define NV_TX_RETRYERROR (1<<3) -#define NV_TX_LASTPACKET1 (1<<8) -#define NV_TX_DEFERRED (1<<10) -#define NV_TX_CARRIERLOST (1<<11) -#define NV_TX_LATECOLLISION (1<<12) -#define NV_TX_UNDERFLOW (1<<13) -#define NV_TX_ERROR (1<<14) -#define NV_TX_VALID (1<<15) - -#define NV_RX_DESCRIPTORVALID (1<<0) -#define NV_RX_MISSEDFRAME (1<<1) -#define NV_RX_SUBSTRACT1 (1<<3) -#define NV_RX_ERROR1 (1<<7) -#define NV_RX_ERROR2 (1<<8) -#define NV_RX_ERROR3 (1<<9) -#define NV_RX_ERROR4 (1<<10) -#define NV_RX_CRCERR (1<<11) -#define NV_RX_OVERFLOW (1<<12) -#define NV_RX_FRAMINGERR (1<<13) -#define NV_RX_ERROR (1<<14) -#define NV_RX_AVAIL (1<<15) +#define FLAG_MASK_V1 0xffff0000 +#define FLAG_MASK_V2 0xffffc000 +#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) +#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) + +#define NV_TX_LASTPACKET (1<<16) +#define NV_TX_RETRYERROR (1<<19) +#define NV_TX_LASTPACKET1 (1<<24) +#define NV_TX_DEFERRED (1<<26) +#define NV_TX_CARRIERLOST (1<<27) +#define NV_TX_LATECOLLISION (1<<28) +#define NV_TX_UNDERFLOW (1<<29) +#define NV_TX_ERROR (1<<30) +#define NV_TX_VALID (1<<31) + +#define NV_TX2_LASTPACKET (1<<29) +#define NV_TX2_RETRYERROR (1<<18) +#define NV_TX2_LASTPACKET1 (1<<23) +#define NV_TX2_DEFERRED (1<<25) +#define NV_TX2_CARRIERLOST (1<<26) +#define NV_TX2_LATECOLLISION (1<<27) +#define NV_TX2_UNDERFLOW (1<<28) +/* error and valid are the same for both */ +#define NV_TX2_ERROR (1<<30) +#define NV_TX2_VALID (1<<31) + +#define NV_RX_DESCRIPTORVALID (1<<16) +#define NV_RX_MISSEDFRAME (1<<17) +#define NV_RX_SUBSTRACT1 (1<<18) +#define NV_RX_ERROR1 (1<<23) +#define NV_RX_ERROR2 (1<<24) +#define NV_RX_ERROR3 (1<<25) +#define NV_RX_ERROR4 (1<<26) +#define NV_RX_CRCERR (1<<27) +#define NV_RX_OVERFLOW (1<<28) +#define NV_RX_FRAMINGERR (1<<29) +#define NV_RX_ERROR (1<<30) +#define NV_RX_AVAIL (1<<31) + +#define NV_RX2_DESCRIPTORVALID (1<<29) +#define NV_RX2_SUBSTRACT1 (1<<25) +#define NV_RX2_ERROR1 (1<<18) +#define NV_RX2_ERROR2 (1<<19) +#define NV_RX2_ERROR3 (1<<20) +#define NV_RX2_ERROR4 (1<<21) +#define NV_RX2_CRCERR (1<<22) +#define NV_RX2_OVERFLOW (1<<23) +#define NV_RX2_FRAMINGERR (1<<24) +/* error and avail are the same for both */ +#define NV_RX2_ERROR (1<<30) +#define NV_RX2_AVAIL (1<<31) /* Miscelaneous hardware related defines: */ #define NV_PCI_REGSZ 0x270 @@ -307,28 +348,66 @@ /* General driver defaults */ #define NV_WATCHDOG_TIMEO (5*HZ) -#define DEFAULT_MTU 1500 /* also maximum supported, at least for now */ #define RX_RING 128 -#define TX_RING 16 -/* limited to 1 packet until we understand NV_TX_LASTPACKET */ -#define TX_LIMIT_STOP 10 -#define TX_LIMIT_START 5 +#define TX_RING 64 +/* + * If your nic mysteriously hangs then try to reduce the limits + * to 1/0: It might be required to set NV_TX_LASTPACKET in the + * last valid ring entry. But this would be impossible to + * implement - probably a disassembly error. + */ +#define TX_LIMIT_STOP 63 +#define TX_LIMIT_START 62 /* rx/tx mac addr + type + vlan + align + slack*/ -#define RX_NIC_BUFSIZE (DEFAULT_MTU + 64) +#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) /* even more slack */ -#define RX_ALLOC_BUFSIZE (DEFAULT_MTU + 128) +#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) #define OOM_REFILL (1+HZ/20) #define POLL_WAIT (1+HZ/100) +#define DESC_VER_1 0x0 +#define DESC_VER_2 0x02100 + +/* PHY defines */ +#define PHY_OUI_MARVELL 0x5043 +#define PHY_OUI_CICADA 0x03f1 +#define PHYID1_OUI_MASK 0x03ff +#define PHYID1_OUI_SHFT 6 +#define PHYID2_OUI_MASK 0xfc00 +#define PHYID2_OUI_SHFT 10 +#define PHY_INIT1 0x0f000 +#define PHY_INIT2 0x0e00 +#define PHY_INIT3 0x01000 +#define PHY_INIT4 0x0200 +#define PHY_INIT5 0x0004 +#define PHY_INIT6 0x02000 +#define PHY_GIGABIT 0x0100 + +#define PHY_TIMEOUT 0x1 +#define PHY_ERROR 0x2 + +#define PHY_100 0x1 +#define PHY_1000 0x2 +#define PHY_HALF 0x100 + +/* FIXME: MII defines that should be added to */ +#define MII_1000BT_CR 0x09 +#define MII_1000BT_SR 0x0a +#define ADVERTISE_1000FULL 0x0200 +#define ADVERTISE_1000HALF 0x0100 +#define LPA_1000FULL 0x0800 +#define LPA_1000HALF 0x0400 + + /* * SMP locking: * All hardware access under dev->priv->lock, except the performance * critical parts: * - rx is (pseudo-) lockless: it relies on the single-threading provided - * by the arch code for interrupts. + * by the arch code for interrupts. * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission * needs dev->priv->lock :-( * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. @@ -346,12 +425,15 @@ int duplex; int phyaddr; int wolenabled; + unsigned int phy_oui; + u16 gigabit; /* General data: RO fields */ dma_addr_t ring_addr; struct pci_dev *pci_dev; u32 orig_mac[2]; u32 irqmask; + u32 desc_ver; /* rx specific fields. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); @@ -371,7 +453,7 @@ unsigned int next_tx, nic_tx; struct sk_buff *tx_skbuff[TX_RING]; dma_addr_t tx_dma[TX_RING]; - u16 tx_flags; + u32 tx_flags; }; /* @@ -396,6 +478,12 @@ readl(base); } +static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) +{ + return le32_to_cpu(prd->FlagLen) + & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); +} + static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, int delay, int delaymax, const char *msg) { @@ -422,24 +510,18 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value) { u8 *base = get_hwbase(dev); - int was_running; u32 reg; int retval; writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); - was_running = 0; - reg = readl(base + NvRegAdapterControl); - if (reg & NVREG_ADAPTCTL_RUNNING) { - was_running = 1; - writel(reg & ~NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); - } + reg = readl(base + NvRegMIIControl); if (reg & NVREG_MIICTL_INUSE) { writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); udelay(NV_MIIBUSY_DELAY); } - reg = NVREG_MIICTL_INUSE | (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; + reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; if (value != MII_READ) { writel(value, base + NvRegMIIData); reg |= NVREG_MIICTL_WRITE; @@ -461,19 +543,117 @@ dev->name, miireg, addr); retval = -1; } else { - /* FIXME: why is that required? */ - udelay(50); retval = readl(base + NvRegMIIData); dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", dev->name, miireg, addr, retval); } - if (was_running) { - reg = readl(base + NvRegAdapterControl); - writel(reg | NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); - } + return retval; } +static int phy_reset(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + u32 miicontrol; + unsigned int tries = 0; + + miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + miicontrol |= BMCR_RESET; + if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { + return -1; + } + + /* wait for 500ms */ + msleep(500); + + /* must wait till reset is deasserted */ + while (miicontrol & BMCR_RESET) { + msleep(10); + miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + /* FIXME: 100 tries seem excessive */ + if (tries++ > 100) + return -1; + } + return 0; +} + +static int phy_init(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 *base = get_hwbase(dev); + u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; + + /* set advertise register */ + reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400); + if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { + printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + + /* get phy interface type */ + phyinterface = readl(base + NvRegPhyInterface); + + /* see if gigabit phy */ + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + if (mii_status & PHY_GIGABIT) { + np->gigabit = PHY_GIGABIT; + mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); + mii_control_1000 &= ~ADVERTISE_1000HALF; + if (phyinterface & PHY_RGMII) + mii_control_1000 |= ADVERTISE_1000FULL; + else + mii_control_1000 &= ~ADVERTISE_1000FULL; + + if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + else + np->gigabit = 0; + + /* reset the phy */ + if (phy_reset(dev)) { + printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + + /* phy vendor specific configuration */ + if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { + phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); + phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); + phy_reserved |= (PHY_INIT3 | PHY_INIT4); + if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); + phy_reserved |= PHY_INIT5; + if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + if (np->phy_oui == PHY_OUI_CICADA) { + phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); + phy_reserved |= PHY_INIT6; + if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + + /* restart auto negotiation */ + mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); + if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { + return PHY_ERROR; + } + + return 0; +} + static void nv_start_rx(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); @@ -488,6 +668,8 @@ writel(np->linkspeed, base + NvRegLinkSpeed); pci_push(base); writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); + dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", + dev->name, np->duplex, np->linkspeed); pci_push(base); } @@ -498,8 +680,8 @@ dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); writel(0, base + NvRegReceiverControl); reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, - NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, - KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); + NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, + KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); udelay(NV_RXSTOP_DELAY2); writel(0, base + NvRegLinkSpeed); @@ -521,8 +703,8 @@ dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); writel(0, base + NvRegTransmitterControl); reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, - NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, - KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); + NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, + KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); udelay(NV_TXSTOP_DELAY2); writel(0, base + NvRegUnknownTransmitterReg); @@ -530,13 +712,14 @@ static void nv_txrx_reset(struct net_device *dev) { + struct fe_priv *np = get_nvpriv(dev); u8 *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); - writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET, base + NvRegTxRxControl); + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl); pci_push(base); udelay(NV_TXRX_RESET_DELAY); - writel(NVREG_TXRXCTL_BIT2, base + NvRegTxRxControl); + writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl); pci_push(base); } @@ -651,11 +834,12 @@ { struct fe_priv *np = get_nvpriv(dev); unsigned int refill_rx = np->refill_rx; + int nr; while (np->cur_rx != refill_rx) { - int nr = refill_rx % RX_RING; struct sk_buff *skb; + nr = refill_rx % RX_RING; if (np->rx_skbuff[nr] == NULL) { skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); @@ -670,10 +854,9 @@ np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_FROMDEVICE); np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); - np->rx_ring[nr].Length = cpu_to_le16(RX_NIC_BUFSIZE); wmb(); - np->rx_ring[nr].Flags = cpu_to_le16(NV_RX_AVAIL); - dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", + np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); + dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", dev->name, refill_rx); refill_rx++; } @@ -704,15 +887,13 @@ int i; np->next_tx = np->nic_tx = 0; - for (i = 0; i < TX_RING; i++) { - np->tx_ring[i].Flags = 0; - } + for (i = 0; i < TX_RING; i++) + np->tx_ring[i].FlagLen = 0; np->cur_rx = RX_RING; np->refill_rx = 0; - for (i = 0; i < RX_RING; i++) { - np->rx_ring[i].Flags = 0; - } + for (i = 0; i < RX_RING; i++) + np->rx_ring[i].FlagLen = 0; return nv_alloc_rx(dev); } @@ -721,7 +902,7 @@ struct fe_priv *np = get_nvpriv(dev); int i; for (i = 0; i < TX_RING; i++) { - np->tx_ring[i].Flags = 0; + np->tx_ring[i].FlagLen = 0; if (np->tx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->tx_dma[i], np->tx_skbuff[i]->len, @@ -738,7 +919,7 @@ struct fe_priv *np = get_nvpriv(dev); int i; for (i = 0; i < RX_RING; i++) { - np->rx_ring[i].Flags = 0; + np->rx_ring[i].FlagLen = 0; wmb(); if (np->rx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->rx_dma[i], @@ -770,11 +951,10 @@ PCI_DMA_TODEVICE); np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); - np->tx_ring[nr].Length = cpu_to_le16(skb->len-1); spin_lock_irq(&np->lock); wmb(); - np->tx_ring[nr].Flags = np->tx_flags; + np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", dev->name, np->next_tx); { @@ -793,7 +973,7 @@ if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP) netif_stop_queue(dev); spin_unlock_irq(&np->lock); - writel(NVREG_TXRXCTL_KICK, get_hwbase(dev) + NvRegTxRxControl); + writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); pci_push(get_hwbase(dev)); return 0; } @@ -806,27 +986,42 @@ static void nv_tx_done(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); + u32 Flags; + int i; - while (np->nic_tx < np->next_tx) { - struct ring_desc *prd; - int i = np->nic_tx % TX_RING; + while (np->nic_tx != np->next_tx) { + i = np->nic_tx % TX_RING; - prd = &np->tx_ring[i]; + Flags = le32_to_cpu(np->tx_ring[i].FlagLen); dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", - dev->name, np->nic_tx, prd->Flags); - if (prd->Flags & cpu_to_le16(NV_TX_VALID)) + dev->name, np->nic_tx, Flags); + if (Flags & NV_TX_VALID) break; - if (prd->Flags & cpu_to_le16(NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| - NV_TX_UNDERFLOW|NV_TX_ERROR)) { - if (prd->Flags & cpu_to_le16(NV_TX_UNDERFLOW)) - np->stats.tx_fifo_errors++; - if (prd->Flags & cpu_to_le16(NV_TX_CARRIERLOST)) - np->stats.tx_carrier_errors++; - np->stats.tx_errors++; + if (np->desc_ver == DESC_VER_1) { + if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| + NV_TX_UNDERFLOW|NV_TX_ERROR)) { + if (Flags & NV_TX_UNDERFLOW) + np->stats.tx_fifo_errors++; + if (Flags & NV_TX_CARRIERLOST) + np->stats.tx_carrier_errors++; + np->stats.tx_errors++; + } else { + np->stats.tx_packets++; + np->stats.tx_bytes += np->tx_skbuff[i]->len; + } } else { - np->stats.tx_packets++; - np->stats.tx_bytes += np->tx_skbuff[i]->len; + if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| + NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { + if (Flags & NV_TX2_UNDERFLOW) + np->stats.tx_fifo_errors++; + if (Flags & NV_TX2_CARRIERLOST) + np->stats.tx_carrier_errors++; + np->stats.tx_errors++; + } else { + np->stats.tx_packets++; + np->stats.tx_bytes += np->tx_skbuff[i]->len; + } } pci_unmap_single(np->pci_dev, np->tx_dma[i], np->tx_skbuff[i]->len, @@ -876,9 +1071,9 @@ static void nv_rx_process(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); + u32 Flags; for (;;) { - struct ring_desc *prd; struct sk_buff *skb; int len; int i; @@ -886,11 +1081,13 @@ break; /* we scanned the whole ring - do not continue */ i = np->cur_rx % RX_RING; - prd = &np->rx_ring[i]; + Flags = le32_to_cpu(np->rx_ring[i].FlagLen); + len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver); + dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", - dev->name, np->cur_rx, prd->Flags); + dev->name, np->cur_rx, Flags); - if (prd->Flags & cpu_to_le16(NV_RX_AVAIL)) + if (Flags & NV_RX_AVAIL) break; /* still owned by hardware, */ /* @@ -904,7 +1101,7 @@ { int j; - dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",prd->Flags); + dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); for (j=0; j<64; j++) { if ((j%16) == 0) dprintk("\n%03x:", j); @@ -913,41 +1110,69 @@ dprintk("\n"); } /* look at what we actually got: */ - if (!(prd->Flags & cpu_to_le16(NV_RX_DESCRIPTORVALID))) - goto next_pkt; - - - len = le16_to_cpu(prd->Length); + if (np->desc_ver == DESC_VER_1) { + if (!(Flags & NV_RX_DESCRIPTORVALID)) + goto next_pkt; - if (prd->Flags & cpu_to_le16(NV_RX_MISSEDFRAME)) { - np->stats.rx_missed_errors++; - np->stats.rx_errors++; - goto next_pkt; - } - if (prd->Flags & cpu_to_le16(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) { - np->stats.rx_errors++; - goto next_pkt; - } - if (prd->Flags & cpu_to_le16(NV_RX_CRCERR)) { - np->stats.rx_crc_errors++; - np->stats.rx_errors++; - goto next_pkt; - } - if (prd->Flags & cpu_to_le16(NV_RX_OVERFLOW)) { - np->stats.rx_over_errors++; - np->stats.rx_errors++; - goto next_pkt; - } - if (prd->Flags & cpu_to_le16(NV_RX_ERROR)) { - /* framing errors are soft errors, the rest is fatal. */ - if (prd->Flags & cpu_to_le16(NV_RX_FRAMINGERR)) { - if (prd->Flags & cpu_to_le16(NV_RX_SUBSTRACT1)) { - len--; + if (Flags & NV_RX_MISSEDFRAME) { + np->stats.rx_missed_errors++; + np->stats.rx_errors++; + goto next_pkt; + } + if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) { + np->stats.rx_errors++; + goto next_pkt; + } + if (Flags & NV_RX_CRCERR) { + np->stats.rx_crc_errors++; + np->stats.rx_errors++; + goto next_pkt; + } + if (Flags & NV_RX_OVERFLOW) { + np->stats.rx_over_errors++; + np->stats.rx_errors++; + goto next_pkt; + } + if (Flags & NV_RX_ERROR) { + /* framing errors are soft errors, the rest is fatal. */ + if (Flags & NV_RX_FRAMINGERR) { + if (Flags & NV_RX_SUBSTRACT1) { + len--; + } + } else { + np->stats.rx_errors++; + goto next_pkt; } - } else { + } + } else { + if (!(Flags & NV_RX2_DESCRIPTORVALID)) + goto next_pkt; + + if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4)) { np->stats.rx_errors++; goto next_pkt; } + if (Flags & NV_RX2_CRCERR) { + np->stats.rx_crc_errors++; + np->stats.rx_errors++; + goto next_pkt; + } + if (Flags & NV_RX2_OVERFLOW) { + np->stats.rx_over_errors++; + np->stats.rx_errors++; + goto next_pkt; + } + if (Flags & NV_RX2_ERROR) { + /* framing errors are soft errors, the rest is fatal. */ + if (Flags & NV_RX2_FRAMINGERR) { + if (Flags & NV_RX2_SUBSTRACT1) { + len--; + } + } else { + np->stats.rx_errors++; + goto next_pkt; + } + } } /* got a valid packet - forward it to the network core */ skb = np->rx_skbuff[i]; @@ -972,7 +1197,7 @@ */ static int nv_change_mtu(struct net_device *dev, int new_mtu) { - if (new_mtu > DEFAULT_MTU) + if (new_mtu > ETH_DATA_LEN) return -EINVAL; dev->mtu = new_mtu; return 0; @@ -1036,6 +1261,8 @@ writel(mask[0], base + NvRegMulticastMaskA); writel(mask[1], base + NvRegMulticastMaskB); writel(pff, base + NvRegPacketFilterFlags); + dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", + dev->name); nv_start_rx(dev); spin_unlock_irq(&np->lock); } @@ -1043,16 +1270,62 @@ static int nv_update_linkspeed(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); - int adv, lpa, newls, newdup; + u8 *base = get_hwbase(dev); + int adv, lpa; + int newls = np->linkspeed; + int newdup = np->duplex; + int mii_status; + int retval = 0; + u32 control_1000, status_1000, phyreg; + + /* BMSR_LSTATUS is latched, read it twice: + * we want the current value. + */ + mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + + if (!(mii_status & BMSR_LSTATUS)) { + dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", + dev->name); + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + retval = 0; + goto set_speed; + } + + /* check auto negotiation is complete */ + if (!(mii_status & BMSR_ANEGCOMPLETE)) { + /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + retval = 0; + dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); + goto set_speed; + } + + retval = 1; + if (np->gigabit == PHY_GIGABIT) { + control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); + status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ); + + if ((control_1000 & ADVERTISE_1000FULL) && + (status_1000 & LPA_1000FULL)) { + dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", + dev->name); + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; + newdup = 1; + goto set_speed; + } + } adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", dev->name, adv, lpa); - /* FIXME: handle parallel detection properly, handle gigabit ethernet */ + /* FIXME: handle parallel detection properly */ lpa = lpa & adv; - if (lpa & LPA_100FULL) { + if (lpa & LPA_100FULL) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; newdup = 1; } else if (lpa & LPA_100HALF) { @@ -1069,47 +1342,75 @@ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 0; } - if (np->duplex != newdup || np->linkspeed != newls) { - np->duplex = newdup; - np->linkspeed = newls; - return 1; - } - return 0; + +set_speed: + if (np->duplex == newdup && np->linkspeed == newls) + return retval; + + dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", + dev->name, np->linkspeed, np->duplex, newls, newdup); + + np->duplex = newdup; + np->linkspeed = newls; + + if (np->gigabit == PHY_GIGABIT) { + phyreg = readl(base + NvRegRandomSeed); + phyreg &= ~(0x3FF00); + if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) + phyreg |= NVREG_RNDSEED_FORCE3; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) + phyreg |= NVREG_RNDSEED_FORCE2; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) + phyreg |= NVREG_RNDSEED_FORCE; + writel(phyreg, base + NvRegRandomSeed); + } + + phyreg = readl(base + NvRegPhyInterface); + phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); + if (np->duplex == 0) + phyreg |= PHY_HALF; + if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) + phyreg |= PHY_100; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) + phyreg |= PHY_1000; + writel(phyreg, base + NvRegPhyInterface); + + writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), + base + NvRegMisc1); + pci_push(base); + writel(np->linkspeed, base + NvRegLinkSpeed); + pci_push(base); + + return retval; } static void nv_link_irq(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); u8 *base = get_hwbase(dev); u32 miistat; - int miival; miistat = readl(base + NvRegMIIStatus); writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); - printk(KERN_DEBUG "%s: link change notification, status 0x%x.\n", dev->name, miistat); + dprintk(KERN_DEBUG "%s: link change notification, status 0x%x.\n", dev->name, miistat); - miival = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); - if (miival & BMSR_ANEGCOMPLETE) { - nv_update_linkspeed(dev); - - if (netif_carrier_ok(dev)) { - nv_stop_rx(dev); + if (miistat & (NVREG_MIISTAT_LINKCHANGE)) { + if (nv_update_linkspeed(dev)) { + if (netif_carrier_ok(dev)) { + nv_stop_rx(dev); + } else { + netif_carrier_on(dev); + printk(KERN_INFO "%s: link up.\n", dev->name); + } + nv_start_rx(dev); } else { - netif_carrier_on(dev); - printk(KERN_INFO "%s: link up.\n", dev->name); - } - writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), - base + NvRegMisc1); - nv_start_rx(dev); - } else { - if (netif_carrier_ok(dev)) { - netif_carrier_off(dev); - printk(KERN_INFO "%s: link down.\n", dev->name); - nv_stop_rx(dev); + if (netif_carrier_ok(dev)) { + netif_carrier_off(dev); + printk(KERN_INFO "%s: link down.\n", dev->name); + nv_stop_rx(dev); + } } - writel(np->linkspeed, base + NvRegLinkSpeed); - pci_push(base); } + dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); } static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) @@ -1136,7 +1437,7 @@ spin_unlock(&np->lock); } - if (events & (NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) { + if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) { nv_rx_process(dev); if (nv_alloc_rx(dev)) { spin_lock(&np->lock); @@ -1158,7 +1459,7 @@ if (events & (NVREG_IRQ_UNKNOWN)) { printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", dev->name, events); - } + } if (i > max_interrupt_work) { spin_lock(&np->lock); /* disable interrupts on the nic */ @@ -1211,21 +1512,27 @@ writel(0, base + NvRegMulticastMaskA); writel(0, base + NvRegMulticastMaskB); writel(0, base + NvRegPacketFilterFlags); + + writel(0, base + NvRegTransmitterControl); + writel(0, base + NvRegReceiverControl); + writel(0, base + NvRegAdapterControl); + + /* 2) initialize descriptor rings */ + oom = nv_init_ring(dev); + writel(0, base + NvRegLinkSpeed); writel(0, base + NvRegUnknownTransmitterReg); nv_txrx_reset(dev); writel(0, base + NvRegUnknownSetupReg6); - /* 2) initialize descriptor rings */ np->in_shutdown = 0; - oom = nv_init_ring(dev); /* 3) set mac address */ { u32 mac[2]; - mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + + mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); @@ -1233,53 +1540,31 @@ writel(mac[1], base + NvRegMacAddrB); } - /* 4) continue setup */ + /* 4) give hw rings */ + writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); + writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); + writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + + /* 5) continue setup */ np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; np->duplex = 0; + + writel(np->linkspeed, base + NvRegLinkSpeed); writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); - writel(0, base + NvRegTxRxControl); + writel(np->desc_ver, base + NvRegTxRxControl); pci_push(base); - writel(NVREG_TXRXCTL_BIT1, base + NvRegTxRxControl); + writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl); reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); - writel(0, base + NvRegUnknownSetupReg4); - - /* 5) Find a suitable PHY */ - writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); - for (i = 1; i < 32; i++) { - int id1, id2; - - spin_lock_irq(&np->lock); - id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ); - spin_unlock_irq(&np->lock); - if (id1 < 0 || id1 == 0xffff) - continue; - spin_lock_irq(&np->lock); - id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ); - spin_unlock_irq(&np->lock); - if (id2 < 0 || id2 == 0xffff) - continue; - dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", - dev->name, id1, id2, i); - np->phyaddr = i; - spin_lock_irq(&np->lock); - nv_update_linkspeed(dev); - spin_unlock_irq(&np->lock); - - break; - } - if (i == 32) { - printk(KERN_INFO "%s: open: failing due to lack of suitable PHY.\n", - dev->name); - ret = -EINVAL; - goto out_drain; - } + writel(0, base + NvRegUnknownSetupReg4); + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); /* 6) continue setup */ - writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), - base + NvRegMisc1); + writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig); @@ -1291,17 +1576,12 @@ writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval); writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); - writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID, + writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); + writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags); - /* 7) start packet processing */ - writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); - writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); - writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), - base + NvRegRingSizes); - i = readl(base + NvRegPowerState); if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); @@ -1309,13 +1589,9 @@ pci_push(base); udelay(10); writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); - writel(NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); - writel(0, base + NvRegIrqMask); pci_push(base); - writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); - pci_push(base); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); pci_push(base); @@ -1324,6 +1600,7 @@ if (ret) goto out_drain; + /* ask for interrupts */ writel(np->irqmask, base + NvRegIrqMask); spin_lock_irq(&np->lock); @@ -1332,18 +1609,27 @@ writel(0, base + NvRegMulticastMaskA); writel(0, base + NvRegMulticastMaskB); writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); + /* One manual link speed update: Interrupts are enabled, future link + * speed changes cause interrupts and are handled by nv_link_irq(). + */ + { + u32 miistat; + miistat = readl(base + NvRegMIIStatus); + writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); + dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); + } + ret = nv_update_linkspeed(dev); nv_start_rx(dev); nv_start_tx(dev); netif_start_queue(dev); - if (oom) - mod_timer(&np->oom_kick, jiffies + OOM_REFILL); - if (mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ) & BMSR_ANEGCOMPLETE) { + if (ret) { netif_carrier_on(dev); } else { printk("%s: no link during initialization.\n", dev->name); netif_carrier_off(dev); } - + if (oom) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock_irq(&np->lock); return 0; @@ -1448,6 +1734,14 @@ goto out_relreg; } + /* handle different descriptor versions */ + if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 || + pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 || + pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3) + np->desc_ver = DESC_VER_1; + else + np->desc_ver = DESC_VER_2; + err = -ENOMEM; dev->base_addr = (unsigned long) ioremap(addr, NV_PCI_REGSZ); if (!dev->base_addr) @@ -1507,9 +1801,15 @@ writel(0, base + NvRegWakeUpFlags); np->wolenabled = 0; - np->tx_flags = cpu_to_le16(NV_TX_LASTPACKET|NV_TX_LASTPACKET1|NV_TX_VALID); - if (id->driver_data & DEV_NEED_LASTPACKET1) - np->tx_flags |= cpu_to_le16(NV_TX_LASTPACKET1); + if (np->desc_ver == DESC_VER_1) { + np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; + if (id->driver_data & DEV_NEED_LASTPACKET1) + np->tx_flags |= NV_TX_LASTPACKET1; + } else { + np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; + if (id->driver_data & DEV_NEED_LASTPACKET1) + np->tx_flags |= NV_TX2_LASTPACKET1; + } if (id->driver_data & DEV_IRQMASK_1) np->irqmask = NVREG_IRQMASK_WANTED_1; if (id->driver_data & DEV_IRQMASK_2) @@ -1517,6 +1817,42 @@ if (id->driver_data & DEV_NEED_TIMERIRQ) np->irqmask |= NVREG_IRQ_TIMER; + /* find a suitable phy */ + for (i = 1; i < 32; i++) { + int id1, id2; + + spin_lock_irq(&np->lock); + id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ); + spin_unlock_irq(&np->lock); + if (id1 < 0 || id1 == 0xffff) + continue; + spin_lock_irq(&np->lock); + id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ); + spin_unlock_irq(&np->lock); + if (id2 < 0 || id2 == 0xffff) + continue; + + id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; + id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; + dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", + pci_name(pci_dev), id1, id2, i); + np->phyaddr = i; + np->phy_oui = id1 | id2; + break; + } + if (i == 32) { + /* PHY in isolate mode? No phy attached and user wants to + * test loopback? Very odd, but can be correct. + */ + printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", + pci_name(pci_dev)); + } + + if (i != 32) { + /* reset it */ + phy_init(dev); + } + err = register_netdev(dev); if (err) { printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); @@ -1570,21 +1906,77 @@ static struct pci_device_id pci_tbl[] = { { /* nForce Ethernet Controller */ .vendor = PCI_VENDOR_ID_NVIDIA, - .device = 0x1C3, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ, }, { /* nForce2 Ethernet Controller */ .vendor = PCI_VENDOR_ID_NVIDIA, - .device = 0x0066, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* nForce3 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_3, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* nForce3 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* nForce3 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_5, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, }, { /* nForce3 Ethernet Controller */ .vendor = PCI_VENDOR_ID_NVIDIA, - .device = 0x00D6, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_6, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* nForce3 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_7, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* CK804 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_8, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* CK804 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_9, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* MCP04 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_10, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, + }, + { /* MCP04 Ethernet Controller */ + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NVENET_11, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ, @@ -1611,9 +2003,9 @@ pci_unregister_driver(&driver); } -MODULE_PARM(max_interrupt_work, "i"); +module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); - + MODULE_AUTHOR("Manfred Spraul "); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); MODULE_LICENSE("GPL"); diff -Nru a/drivers/net/gianfar.c b/drivers/net/gianfar.c --- /dev/null Wed Dec 31 16:00:00 196900 +++ b/drivers/net/gianfar.c 2004-07-13 12:46:19 -07:00 @@ -0,0 +1,1921 @@ +/* + * drivers/net/gianfar.c + * + * Gianfar Ethernet Driver + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Gianfar: AKA Lambda Draconis, "Dragon" + * RA 11 31 24.2 + * Dec +69 19 52 + * V 3.84 + * B-V +1.62 + * + * Theory of operation + * This driver is designed for the Triple-speed Ethernet + * controllers on the Freescale 8540/8560 integrated processors, + * as well as the Fast Ethernet Controller on the 8540. + * + * The driver is initialized through OCP. Structures which + * define the configuration needed by the board are defined in a + * board structure in arch/ppc/platforms (though I do not + * discount the possibility that other architectures could one + * day be supported. One assumption the driver currently makes + * is that the PHY is configured in such a way to advertise all + * capabilities. This is a sensible default, and on certain + * PHYs, changing this default encounters substantial errata + * issues. Future versions may remove this requirement, but for + * now, it is best for the firmware to ensure this is the case. + * + * The Gianfar Ethernet Controller uses a ring of buffer + * descriptors. The beginning is indicated by a register + * pointing to the physical address of the start of the ring. + * The end is determined by a "wrap" bit being set in the + * last descriptor of the ring. + * + * When a packet is received, the RXF bit in the + * IEVENT register is set, triggering an interrupt when the + * corresponding bit in the IMASK register is also set (if + * interrupt coalescing is active, then the interrupt may not + * happen immediately, but will wait until either a set number + * of frames or amount of time have passed.). In NAPI, the + * interrupt handler will signal there is work to be done, and + * exit. Without NAPI, the packet(s) will be handled + * immediately. Both methods will start at the last known empty + * descriptor, and process every subsequent descriptor until there + * are none left with data (NAPI will stop after a set number of + * packets to give time to other tasks, but will eventually + * process all the packets). The data arrives inside a + * pre-allocated skb, and so after the skb is passed up to the + * stack, a new skb must be allocated, and the address field in + * the buffer descriptor must be updated to indicate this new + * skb. + * + * When the kernel requests that a packet be transmitted, the + * driver starts where it left off last time, and points the + * descriptor at the buffer which was passed in. The driver + * then informs the DMA engine that there are packets ready to + * be transmitted. Once the controller is finished transmitting + * the packet, an interrupt may be triggered (under the same + * conditions as for reception, but depending on the TXF bit). + * The driver then cleans up the buffer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" +#include "gianfar_phy.h" +#ifdef CONFIG_NET_FASTROUTE +#include +#include +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41) +#define irqreturn_t void +#define IRQ_HANDLED +#endif + +#define TX_TIMEOUT (1*HZ) +#define SKB_ALLOC_TIMEOUT 1000000 +#undef BRIEF_GFAR_ERRORS +#undef VERBOSE_GFAR_ERRORS + +#ifdef CONFIG_GFAR_NAPI +#define RECEIVE(x) netif_receive_skb(x) +#else +#define RECEIVE(x) netif_rx(x) +#endif + +#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.0, " +char gfar_driver_name[] = "Gianfar Ethernet"; +char gfar_driver_version[] = "1.0"; + +int startup_gfar(struct net_device *dev); +static int gfar_enet_open(struct net_device *dev); +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static void gfar_timeout(struct net_device *dev); +static int gfar_close(struct net_device *dev); +struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); +static struct net_device_stats *gfar_get_stats(struct net_device *dev); +static int gfar_set_mac_address(struct net_device *dev); +static int gfar_change_mtu(struct net_device *dev, int new_mtu); +static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); +irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs); +static void gfar_phy_change(void *data); +static void gfar_phy_timer(unsigned long data); +static void adjust_link(struct net_device *dev); +static void init_registers(struct net_device *dev); +static int init_phy(struct net_device *dev); +static int gfar_probe(struct ocp_device *ocpdev); +static void gfar_remove(struct ocp_device *ocpdev); +void free_skb_resources(struct gfar_private *priv); +static void gfar_set_multi(struct net_device *dev); +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); +#ifdef CONFIG_GFAR_NAPI +static int gfar_poll(struct net_device *dev, int *budget); +#endif +#ifdef CONFIG_NET_FASTROUTE +static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst); +#endif +static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length); +#ifdef CONFIG_GFAR_NAPI +static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); +#else +static int gfar_clean_rx_ring(struct net_device *dev); +#endif +static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); + +extern struct ethtool_ops gfar_ethtool_ops; +extern void gfar_gstrings_normon(struct net_device *dev, u32 stringset, + u8 * buf); +extern void gfar_fill_stats_normon(struct net_device *dev, + struct ethtool_stats *dummy, u64 * buf); +extern int gfar_stats_count_normon(struct net_device *dev); + + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Gianfar Ethernet Driver"); +MODULE_LICENSE("GPL"); + +/* Called by the ocp code to initialize device data structures + * required for bringing up the device + * returns 0 on success */ +static int gfar_probe(struct ocp_device *ocpdev) +{ + u32 tempval; + struct ocp_device *mdiodev; + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + struct ocp_gfar_data *einfo; + int idx; + int err = 0; + struct ethtool_ops *dev_ethtool_ops; + + einfo = (struct ocp_gfar_data *) ocpdev->def->additions; + + if (einfo == NULL) { + printk(KERN_ERR "gfar %d: Missing additional data!\n", + ocpdev->def->index); + + return -ENODEV; + } + + /* get a pointer to the register memory which can + * configure the PHYs. If it's different from this set, + * get the device which has those regs */ + if ((einfo->phyregidx >= 0) && (einfo->phyregidx != ocpdev->def->index)) { + mdiodev = ocp_find_device(OCP_ANY_ID, + OCP_FUNC_GFAR, einfo->phyregidx); + + /* If the device which holds the MDIO regs isn't + * up, wait for it to come up */ + if (mdiodev == NULL) + return -EAGAIN; + } else { + mdiodev = ocpdev; + } + + /* Create an ethernet device instance */ + dev = alloc_etherdev(sizeof (*priv)); + + if (dev == NULL) + return -ENOMEM; + + priv = netdev_priv(dev); + + /* Set the info in the priv to the current info */ + priv->einfo = einfo; + + /* get a pointer to the register memory */ + priv->regs = (struct gfar *) + ioremap(ocpdev->def->paddr, sizeof (struct gfar)); + + if (priv->regs == NULL) { + err = -ENOMEM; + goto regs_fail; + } + + /* Set the PHY base address */ + priv->phyregs = (struct gfar *) + ioremap(mdiodev->def->paddr, sizeof (struct gfar)); + + if (priv->phyregs == NULL) { + err = -ENOMEM; + goto phy_regs_fail; + } + + ocp_set_drvdata(ocpdev, dev); + + /* Stop the DMA engine now, in case it was running before */ + /* (The firmware could have used it, and left it running). */ + /* To do this, we write Graceful Receive Stop and Graceful */ + /* Transmit Stop, and then wait until the corresponding bits */ + /* in IEVENT indicate the stops have completed. */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + tempval = gfar_read(&priv->regs->dmactrl); + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) + cpu_relax(); + + /* Reset MAC layer */ + gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); + + tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + gfar_write(&priv->regs->maccfg1, tempval); + + /* Initialize MACCFG2. */ + gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); + + /* Initialize ECNTRL */ + gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); + + /* Copy the station address into the dev structure, */ + /* and into the address registers MAC_STNADDR1,2. */ + /* Backwards, because little endian MACs are dumb. */ + /* Don't set the regs if the firmware already did */ + memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long) (priv->regs); + + SET_MODULE_OWNER(dev); + + /* Fill in the dev structure */ + dev->open = gfar_enet_open; + dev->hard_start_xmit = gfar_start_xmit; + dev->tx_timeout = gfar_timeout; + dev->watchdog_timeo = TX_TIMEOUT; +#ifdef CONFIG_GFAR_NAPI + dev->poll = gfar_poll; + dev->weight = GFAR_DEV_WEIGHT; +#endif + dev->stop = gfar_close; + dev->get_stats = gfar_get_stats; + dev->change_mtu = gfar_change_mtu; + dev->mtu = 1500; + dev->set_multicast_list = gfar_set_multi; + dev->flags |= IFF_MULTICAST; + + dev_ethtool_ops = + (struct ethtool_ops *)kmalloc(sizeof(struct ethtool_ops), + GFP_KERNEL); + + if(dev_ethtool_ops == NULL) { + err = -ENOMEM; + goto ethtool_fail; + } + + memcpy(dev_ethtool_ops, &gfar_ethtool_ops, sizeof(gfar_ethtool_ops)); + + /* If there is no RMON support in this device, we don't + * want to expose non-existant statistics */ + if((priv->einfo->flags & GFAR_HAS_RMON) == 0) { + dev_ethtool_ops->get_strings = gfar_gstrings_normon; + dev_ethtool_ops->get_stats_count = gfar_stats_count_normon; + dev_ethtool_ops->get_ethtool_stats = gfar_fill_stats_normon; + } + + if((priv->einfo->flags & GFAR_HAS_COALESCE) == 0) { + dev_ethtool_ops->set_coalesce = NULL; + dev_ethtool_ops->get_coalesce = NULL; + } + + dev->ethtool_ops = dev_ethtool_ops; + +#ifdef CONFIG_NET_FASTROUTE + dev->accept_fastpath = gfar_accept_fastpath; +#endif + + priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; +#ifdef CONFIG_GFAR_BUFSTASH + priv->rx_stash_size = STASH_LENGTH; +#endif + priv->tx_ring_size = DEFAULT_TX_RING_SIZE; + priv->rx_ring_size = DEFAULT_RX_RING_SIZE; + + /* Initially, coalescing is disabled */ + priv->txcoalescing = 0; + priv->txcount = 0; + priv->txtime = 0; + priv->rxcoalescing = 0; + priv->rxcount = 0; + priv->rxtime = 0; + + err = register_netdev(dev); + + if (err) { + printk(KERN_ERR "%s: Cannot register net device, aborting.\n", + dev->name); + goto register_fail; + } + + /* Print out the device info */ + printk(DEVICE_NAME, dev->name); + for (idx = 0; idx < 6; idx++) + printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); + printk("\n"); + + /* Even more device info helps when determining which kernel */ + /* provided which set of benchmarks. Since this is global for all */ + /* devices, we only print it once */ +#ifdef CONFIG_GFAR_NAPI + printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); +#else + printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); +#endif + printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", + dev->name, priv->rx_ring_size, priv->tx_ring_size); + + return 0; + + +register_fail: + kfree(dev_ethtool_ops); +ethtool_fail: + iounmap((void *) priv->phyregs); +phy_regs_fail: + iounmap((void *) priv->regs); +regs_fail: + free_netdev(dev); + return -ENOMEM; +} + +static void gfar_remove(struct ocp_device *ocpdev) +{ + struct net_device *dev = ocp_get_drvdata(ocpdev); + struct gfar_private *priv = netdev_priv(dev); + + ocp_set_drvdata(ocpdev, NULL); + + kfree(dev->ethtool_ops); + iounmap((void *) priv->regs); + iounmap((void *) priv->phyregs); + free_netdev(dev); +} + +/* Configure the PHY for dev. + * returns 0 if success. -1 if failure + */ +static int init_phy(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_info *curphy; + + priv->link = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->olddplx = -1; + + /* get info for this PHY */ + curphy = get_phy_info(dev); + + if (curphy == NULL) { + printk(KERN_ERR "%s: No PHY found\n", dev->name); + return -1; + } + + priv->phyinfo = curphy; + + /* Run the commands which configure the PHY */ + phy_run_commands(dev, curphy->config); + + return 0; +} + +static void init_registers(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); + + /* Initialize IMASK */ + gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); + + /* Init hash registers to zero */ + gfar_write(&priv->regs->iaddr0, 0); + gfar_write(&priv->regs->iaddr1, 0); + gfar_write(&priv->regs->iaddr2, 0); + gfar_write(&priv->regs->iaddr3, 0); + gfar_write(&priv->regs->iaddr4, 0); + gfar_write(&priv->regs->iaddr5, 0); + gfar_write(&priv->regs->iaddr6, 0); + gfar_write(&priv->regs->iaddr7, 0); + + gfar_write(&priv->regs->gaddr0, 0); + gfar_write(&priv->regs->gaddr1, 0); + gfar_write(&priv->regs->gaddr2, 0); + gfar_write(&priv->regs->gaddr3, 0); + gfar_write(&priv->regs->gaddr4, 0); + gfar_write(&priv->regs->gaddr5, 0); + gfar_write(&priv->regs->gaddr6, 0); + gfar_write(&priv->regs->gaddr7, 0); + + /* Zero out rctrl */ + gfar_write(&priv->regs->rctrl, 0x00000000); + + /* Zero out the rmon mib registers if it has them */ + if (priv->einfo->flags & GFAR_HAS_RMON) { + memset((void *) &(priv->regs->rmon), 0, + sizeof (struct rmon_mib)); + + /* Mask off the CAM interrupts */ + gfar_write(&priv->regs->rmon.cam1, 0xffffffff); + gfar_write(&priv->regs->rmon.cam2, 0xffffffff); + } + + /* Initialize the max receive buffer length */ + gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); + +#ifdef CONFIG_GFAR_BUFSTASH + /* If we are stashing buffers, we need to set the + * extraction length to the size of the buffer */ + gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16); +#endif + + /* Initialize the Minimum Frame Length Register */ + gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); + + /* Setup Attributes so that snooping is on for rx */ + gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS); + gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS); + + /* Assign the TBI an address which won't conflict with the PHYs */ + gfar_write(&priv->regs->tbipa, TBIPA_VALUE); +} + +void stop_gfar(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + unsigned long flags; + u32 tempval; + + /* Lock it down */ + spin_lock_irqsave(&priv->lock, flags); + + /* Tell the kernel the link is down */ + priv->link = 0; + adjust_link(dev); + + /* Mask all interrupts */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + + /* Clear all interrupts */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); + + /* Stop the DMA, and wait for it to stop */ + tempval = gfar_read(&priv->regs->dmactrl); + if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) + != (DMACTRL_GRS | DMACTRL_GTS)) { + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + while (!(gfar_read(&priv->regs->ievent) & + (IEVENT_GRSC | IEVENT_GTSC))) + cpu_relax(); + } + + /* Disable Rx and Tx */ + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); + + if (priv->einfo->flags & GFAR_HAS_PHY_INTR) { + phy_run_commands(dev, priv->phyinfo->shutdown); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Free the IRQs */ + if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) { + free_irq(priv->einfo->interruptError, dev); + free_irq(priv->einfo->interruptTransmit, dev); + free_irq(priv->einfo->interruptReceive, dev); + } else { + free_irq(priv->einfo->interruptTransmit, dev); + } + + if (priv->einfo->flags & GFAR_HAS_PHY_INTR) { + free_irq(priv->einfo->interruptPHY, dev); + } else { + del_timer_sync(&priv->phy_info_timer); + } + + free_skb_resources(priv); + + dma_unmap_single(NULL, gfar_read(®s->tbase), + sizeof(struct txbd)*priv->tx_ring_size, + DMA_BIDIRECTIONAL); + dma_unmap_single(NULL, gfar_read(®s->rbase), + sizeof(struct rxbd)*priv->rx_ring_size, + DMA_BIDIRECTIONAL); + + /* Free the buffer descriptors */ + kfree(priv->tx_bd_base); +} + +/* If there are any tx skbs or rx skbs still around, free them. + * Then free tx_skbuff and rx_skbuff */ +void free_skb_resources(struct gfar_private *priv) +{ + struct rxbd8 *rxbdp; + struct txbd8 *txbdp; + int i; + + /* Go through all the buffer descriptors and free their data buffers */ + txbdp = priv->tx_bd_base; + + for (i = 0; i < priv->tx_ring_size; i++) { + + if (priv->tx_skbuff[i]) { + dma_unmap_single(NULL, txbdp->bufPtr, + txbdp->length, + DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + } + } + + kfree(priv->tx_skbuff); + + rxbdp = priv->rx_bd_base; + + /* rx_skbuff is not guaranteed to be allocated, so only + * free it and its contents if it is allocated */ + if(priv->rx_skbuff != NULL) { + for (i = 0; i < priv->rx_ring_size; i++) { + if (priv->rx_skbuff[i]) { + dma_unmap_single(NULL, rxbdp->bufPtr, + priv->rx_buffer_size + + RXBUF_ALIGNMENT, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(priv->rx_skbuff[i]); + priv->rx_skbuff[i] = NULL; + } + + rxbdp->status = 0; + rxbdp->length = 0; + rxbdp->bufPtr = 0; + + rxbdp++; + } + + kfree(priv->rx_skbuff); + } +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *dev) +{ + struct txbd8 *txbdp; + struct rxbd8 *rxbdp; + unsigned long addr; + int i; + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 tempval; + int err = 0; + + gfar_write(®s->imask, IMASK_INIT_CLEAR); + + /* Allocate memory for the buffer descriptors */ + addr = + (unsigned int) kmalloc(sizeof (struct txbd8) * priv->tx_ring_size + + sizeof (struct rxbd8) * priv->rx_ring_size, + GFP_KERNEL); + + if (addr == 0) { + printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", + dev->name); + return -ENOMEM; + } + + priv->tx_bd_base = (struct txbd8 *) addr; + + /* enet DMA only understands physical addresses */ + gfar_write(®s->tbase, + dma_map_single(NULL, (void *)addr, + sizeof(struct txbd8) * priv->tx_ring_size, + DMA_BIDIRECTIONAL)); + + /* Start the rx descriptor ring where the tx ring leaves off */ + addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; + priv->rx_bd_base = (struct rxbd8 *) addr; + gfar_write(®s->rbase, + dma_map_single(NULL, (void *)addr, + sizeof(struct rxbd8) * priv->rx_ring_size, + DMA_BIDIRECTIONAL)); + + /* Setup the skbuff rings */ + priv->tx_skbuff = + (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * + priv->tx_ring_size, GFP_KERNEL); + + if (priv->tx_skbuff == NULL) { + printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", + dev->name); + err = -ENOMEM; + goto tx_skb_fail; + } + + for (i = 0; i < priv->tx_ring_size; i++) + priv->tx_skbuff[i] = NULL; + + priv->rx_skbuff = + (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * + priv->rx_ring_size, GFP_KERNEL); + + if (priv->rx_skbuff == NULL) { + printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", + dev->name); + err = -ENOMEM; + goto rx_skb_fail; + } + + for (i = 0; i < priv->rx_ring_size; i++) + priv->rx_skbuff[i] = NULL; + + /* Initialize some variables in our dev structure */ + priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; + priv->cur_rx = priv->rx_bd_base; + priv->skb_curtx = priv->skb_dirtytx = 0; + priv->skb_currx = 0; + + /* Initialize Transmit Descriptor Ring */ + txbdp = priv->tx_bd_base; + for (i = 0; i < priv->tx_ring_size; i++) { + txbdp->status = 0; + txbdp->length = 0; + txbdp->bufPtr = 0; + txbdp++; + } + + /* Set the last descriptor in the ring to indicate wrap */ + txbdp--; + txbdp->status |= TXBD_WRAP; + + rxbdp = priv->rx_bd_base; + for (i = 0; i < priv->rx_ring_size; i++) { + struct sk_buff *skb = NULL; + + rxbdp->status = 0; + + skb = gfar_new_skb(dev, rxbdp); + + priv->rx_skbuff[i] = skb; + + rxbdp++; + } + + /* Set the last descriptor in the ring to wrap */ + rxbdp--; + rxbdp->status |= RXBD_WRAP; + + /* If the device has multiple interrupts, register for + * them. Otherwise, only register for the one */ + if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) { + /* Install our interrupt handlers for Error, + * Transmit, and Receive */ + if (request_irq(priv->einfo->interruptError, gfar_error, + 0, "enet_error", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d\n", + dev->name, priv->einfo->interruptError); + + err = -1; + goto err_irq_fail; + } + + if (request_irq(priv->einfo->interruptTransmit, gfar_transmit, + 0, "enet_tx", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d\n", + dev->name, priv->einfo->interruptTransmit); + + err = -1; + + goto tx_irq_fail; + } + + if (request_irq(priv->einfo->interruptReceive, gfar_receive, + 0, "enet_rx", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", + dev->name, priv->einfo->interruptReceive); + + err = -1; + goto rx_irq_fail; + } + } else { + if (request_irq(priv->einfo->interruptTransmit, gfar_interrupt, + 0, "gfar_interrupt", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d\n", + dev->name, priv->einfo->interruptError); + + err = -1; + goto err_irq_fail; + } + } + + /* Grab the PHY interrupt */ + if (priv->einfo->flags & GFAR_HAS_PHY_INTR) { + if (request_irq(priv->einfo->interruptPHY, phy_interrupt, + SA_SHIRQ, "phy_interrupt", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n", + dev->name, priv->einfo->interruptPHY); + + err = -1; + + if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) + goto phy_irq_fail; + else + goto tx_irq_fail; + } + } else { + init_timer(&priv->phy_info_timer); + priv->phy_info_timer.function = &gfar_phy_timer; + priv->phy_info_timer.data = (unsigned long) dev; + mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ); + } + + /* Set up the bottom half queue */ + INIT_WORK(&priv->tq, (void (*)(void *))gfar_phy_change, dev); + + /* Configure the PHY interrupt */ + phy_run_commands(dev, priv->phyinfo->startup); + + /* Tell the kernel the link is up, and determine the + * negotiated features (speed, duplex) */ + adjust_link(dev); + + if (priv->link == 0) + printk(KERN_INFO "%s: No link detected\n", dev->name); + + /* Configure the coalescing support */ + if (priv->txcoalescing) + gfar_write(®s->txic, + mk_ic_value(priv->txcount, priv->txtime)); + else + gfar_write(®s->txic, 0); + + if (priv->rxcoalescing) + gfar_write(®s->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(®s->rxic, 0); + + init_waitqueue_head(&priv->rxcleanupq); + + /* Enable Rx and Tx in MACCFG1 */ + tempval = gfar_read(®s->maccfg1); + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); + + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(&priv->regs->dmactrl, tempval); + + /* Clear THLT, so that the DMA starts polling now */ + gfar_write(®s->tstat, TSTAT_CLEAR_THALT); + + /* Make sure we aren't stopped */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + /* Unmask the interrupts we look for */ + gfar_write(®s->imask, IMASK_DEFAULT); + + return 0; + +phy_irq_fail: + free_irq(priv->einfo->interruptReceive, dev); +rx_irq_fail: + free_irq(priv->einfo->interruptTransmit, dev); +tx_irq_fail: + free_irq(priv->einfo->interruptError, dev); +err_irq_fail: +rx_skb_fail: + free_skb_resources(priv); +tx_skb_fail: + kfree(priv->tx_bd_base); + return err; +} + +/* Called when something needs to use the ethernet device */ +/* Returns 0 for success. */ +static int gfar_enet_open(struct net_device *dev) +{ + int err; + + /* Initialize a bunch of registers */ + init_registers(dev); + + gfar_set_mac_address(dev); + + err = init_phy(dev); + + if (err) + return err; + + err = startup_gfar(dev); + + netif_start_queue(dev); + + return err; +} + +/* This is called by the kernel when a frame is ready for transmission. */ +/* It is pointed to by the dev->hard_start_xmit function pointer */ +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct txbd8 *txbdp; + + /* Update transmit stats */ + priv->stats.tx_bytes += skb->len; + + /* Lock priv now */ + spin_lock_irq(&priv->lock); + + /* Point at the first free tx descriptor */ + txbdp = priv->cur_tx; + + /* Clear all but the WRAP status flags */ + txbdp->status &= TXBD_WRAP; + + /* Set buffer length and pointer */ + txbdp->length = skb->len; + txbdp->bufPtr = dma_map_single(NULL, skb->data, + skb->len, DMA_TO_DEVICE); + + /* Save the skb pointer so we can free it later */ + priv->tx_skbuff[priv->skb_curtx] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + priv->skb_curtx = + (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); + + /* Flag the BD as interrupt-causing */ + txbdp->status |= TXBD_INTERRUPT; + + /* Flag the BD as ready to go, last in frame, and */ + /* in need of CRC */ + txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); + + dev->trans_start = jiffies; + + /* If this was the last BD in the ring, the next one */ + /* is at the beginning of the ring */ + if (txbdp->status & TXBD_WRAP) + txbdp = priv->tx_bd_base; + else + txbdp++; + + /* If the next BD still needs to be cleaned up, then the bds + are full. We need to tell the kernel to stop sending us stuff. */ + if (txbdp == priv->dirty_tx) { + netif_stop_queue(dev); + + priv->stats.tx_fifo_errors++; + } + + /* Update the current txbd to the next one */ + priv->cur_tx = txbdp; + + /* Tell the DMA to go go go */ + gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + + /* Unlock priv */ + spin_unlock_irq(&priv->lock); + + return 0; +} + +/* Stops the kernel queue, and halts the controller */ +static int gfar_close(struct net_device *dev) +{ + stop_gfar(dev); + + netif_stop_queue(dev); + + return 0; +} + +/* returns a net_device_stats structure pointer */ +static struct net_device_stats * gfar_get_stats(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + return &(priv->stats); +} + +/* Changes the mac address if the controller is not running. */ +int gfar_set_mac_address(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int i; + char tmpbuf[MAC_ADDR_LEN]; + u32 tempval; + + /* Now copy it into the mac registers backwards, cuz */ + /* little endian is silly */ + for (i = 0; i < MAC_ADDR_LEN; i++) + tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i]; + + gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf))); + + tempval = *((u32 *) (tmpbuf + 4)); + + gfar_write(&priv->regs->macstnaddr2, tempval); + + return 0; +} + +/********************************************************************** + * gfar_accept_fastpath + * + * Used to authenticate to the kernel that a fast path entry can be + * added to device's routing table cache + * + * Input : pointer to ethernet interface network device structure and + * a pointer to the designated entry to be added to the cache. + * Output : zero upon success, negative upon failure + **********************************************************************/ +#ifdef CONFIG_NET_FASTROUTE +static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst) +{ + struct net_device *odev = dst->dev; + + if ((dst->ops->protocol != __constant_htons(ETH_P_IP)) + || (odev->type != ARPHRD_ETHER) + || (odev->accept_fastpath == NULL)) { + return -1; + } + + return 0; +} +#endif + +/* try_fastroute() -- Checks the fastroute cache to see if a given packet + * can be routed immediately to another device. If it can, we send it. + * If we used a fastroute, we return 1. Otherwise, we return 0. + * Returns 0 if CONFIG_NET_FASTROUTE is not on + */ +static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length) +{ +#ifdef CONFIG_NET_FASTROUTE + struct ethhdr *eth; + struct iphdr *iph; + unsigned int hash; + struct rtable *rt; + struct net_device *odev; + struct gfar_private *priv = netdev_priv(dev); + unsigned int CPU_ID = smp_processor_id(); + + eth = (struct ethhdr *) (skb->data); + + /* Only route ethernet IP packets */ + if (eth->h_proto == __constant_htons(ETH_P_IP)) { + iph = (struct iphdr *) (skb->data + ETH_HLEN); + + /* Generate the hash value */ + hash = ((*(u8 *) &iph->daddr) ^ (*(u8 *) & iph->saddr)) & NETDEV_FASTROUTE_HMASK; + + rt = (struct rtable *) (dev->fastpath[hash]); + if (rt != NULL + && ((*(u32 *) &iph->daddr) == (*(u32 *) &rt->key.dst)) + && ((*(u32 *) &iph->saddr) == (*(u32 *) &rt->key.src)) + && !(rt->u.dst.obsolete)) { + odev = rt->u.dst.dev; + netdev_rx_stat[CPU_ID].fastroute_hit++; + + /* Make sure the packet is: + * 1) IPv4 + * 2) without any options (header length of 5) + * 3) Not a multicast packet + * 4) going to a valid destination + * 5) Not out of time-to-live + */ + if (iph->version == 4 + && iph->ihl == 5 + && (!(eth->h_dest[0] & 0x01)) + && neigh_is_valid(rt->u.dst.neighbour) + && iph->ttl > 1) { + + /* Fast Route Path: Taken if the outgoing device is ready to transmit the packet now */ + if ((!netif_queue_stopped(odev)) + && (!spin_is_locked(odev->xmit_lock)) + && (skb->len <= (odev->mtu + ETH_HLEN + 2 + 4))) { + + skb->pkt_type = PACKET_FASTROUTE; + skb->protocol = __constant_htons(ETH_P_IP); + ip_decrease_ttl(iph); + memcpy(eth->h_source, odev->dev_addr, MAC_ADDR_LEN); + memcpy(eth->h_dest, rt->u.dst.neighbour->ha, MAC_ADDR_LEN); + skb->dev = odev; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + if (odev->hard_start_xmit(skb, odev) != 0) { + panic("%s: FastRoute path corrupted", dev->name); + } + netdev_rx_stat[CPU_ID].fastroute_success++; + } + + /* Semi Fast Route Path: Mark the packet as needing fast routing, but let the + * stack handle getting it to the device */ + else { + skb->pkt_type = PACKET_FASTROUTE; + skb->nh.raw = skb->data + ETH_HLEN; + skb->protocol = __constant_htons(ETH_P_IP); + netdev_rx_stat[CPU_ID].fastroute_defer++; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + if(RECEIVE(skb) == NET_RX_DROP) { + priv->extra_stats.kernel_dropped++; + } + } + + return 1; + } + } + } +#endif /* CONFIG_NET_FASTROUTE */ + return 0; +} + +static int gfar_change_mtu(struct net_device *dev, int new_mtu) +{ + int tempsize, tempval; + struct gfar_private *priv = netdev_priv(dev); + int oldsize = priv->rx_buffer_size; + int frame_size = new_mtu + 18; + + if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); + return -EINVAL; + } + + tempsize = + (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + + INCREMENTAL_BUFFER_SIZE; + + /* Only stop and start the controller if it isn't already + * stopped */ + if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + stop_gfar(dev); + + priv->rx_buffer_size = tempsize; + + dev->mtu = new_mtu; + + gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); + gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); + + /* If the mtu is larger than the max size for standard + * ethernet frames (ie, a jumbo frame), then set maccfg2 + * to allow huge frames, and to check the length */ + tempval = gfar_read(&priv->regs->maccfg2); + + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) + tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + else + tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + + gfar_write(&priv->regs->maccfg2, tempval); + + if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + startup_gfar(dev); + + return 0; +} + +/* gfar_timeout gets called when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. */ +static void gfar_timeout(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + priv->stats.tx_errors++; + + if (dev->flags & IFF_UP) { + stop_gfar(dev); + startup_gfar(dev); + } + + if (!netif_queue_stopped(dev)) + netif_schedule(dev); +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct gfar_private *priv = netdev_priv(dev); + struct txbd8 *bdp; + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); + + /* Lock priv */ + spin_lock(&priv->lock); + bdp = priv->dirty_tx; + while ((bdp->status & TXBD_READY) == 0) { + /* If dirty_tx and cur_tx are the same, then either the */ + /* ring is empty or full now (it could only be full in the beginning, */ + /* obviously). If it is empty, we are done. */ + if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) + break; + + priv->stats.tx_packets++; + + /* Deferred means some collisions occurred during transmit, */ + /* but we eventually sent the packet. */ + if (bdp->status & TXBD_DEF) + priv->stats.collisions++; + + /* Free the sk buffer associated with this TxBD */ + dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); + priv->tx_skbuff[priv->skb_dirtytx] = NULL; + priv->skb_dirtytx = + (priv->skb_dirtytx + + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); + + /* update bdp to point at next bd in the ring (wrapping if necessary) */ + if (bdp->status & TXBD_WRAP) + bdp = priv->tx_bd_base; + else + bdp++; + + /* Move dirty_tx to be the next bd */ + priv->dirty_tx = bdp; + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + } /* while ((bdp->status & TXBD_READY) == 0) */ + + /* If we are coalescing the interrupts, reset the timer */ + /* Otherwise, clear it */ + if (priv->txcoalescing) + gfar_write(&priv->regs->txic, + mk_ic_value(priv->txcount, priv->txtime)); + else + gfar_write(&priv->regs->txic, 0); + + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) +{ + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb = NULL; + unsigned int timeout = SKB_ALLOC_TIMEOUT; + + /* We have to allocate the skb, so keep trying till we succeed */ + while ((!skb) && timeout--) + skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); + + if (skb == NULL) + return NULL; + + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, + RXBUF_ALIGNMENT - + (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1))); + + skb->dev = dev; + + bdp->bufPtr = dma_map_single(NULL, skb->data, + priv->rx_buffer_size + RXBUF_ALIGNMENT, + DMA_FROM_DEVICE); + + bdp->length = 0; + + /* Mark the buffer empty */ + bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); + + return skb; +} + +static inline void count_errors(unsigned short status, struct gfar_private *priv) +{ + struct net_device_stats *stats = &priv->stats; + struct gfar_extra_stats *estats = &priv->extra_stats; + + /* If the packet was truncated, none of the other errors + * matter */ + if (status & RXBD_TRUNCATED) { + stats->rx_length_errors++; + + estats->rx_trunc++; + + return; + } + /* Count the errors, if there were any */ + if (status & (RXBD_LARGE | RXBD_SHORT)) { + stats->rx_length_errors++; + + if (status & RXBD_LARGE) + estats->rx_large++; + else + estats->rx_short++; + } + if (status & RXBD_NONOCTET) { + stats->rx_frame_errors++; + estats->rx_nonoctet++; + } + if (status & RXBD_CRCERR) { + estats->rx_crcerr++; + stats->rx_crc_errors++; + } + if (status & RXBD_OVERRUN) { + estats->rx_overrun++; + stats->rx_crc_errors++; + } +} + +irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct gfar_private *priv = netdev_priv(dev); + +#ifdef CONFIG_GFAR_NAPI + u32 tempval; +#endif + + /* Clear IEVENT, so rx interrupt isn't called again + * because of this interrupt */ + gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); + + /* support NAPI */ +#ifdef CONFIG_GFAR_NAPI + if (netif_rx_schedule_prep(dev)) { + tempval = gfar_read(&priv->regs->imask); + tempval &= IMASK_RX_DISABLED; + gfar_write(&priv->regs->imask, tempval); + + __netif_rx_schedule(dev); + } else { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", + dev->name, gfar_read(priv->regs->ievent), + gfar_read(priv->regs->imask)); +#endif + } +#else + + spin_lock(&priv->lock); + gfar_clean_rx_ring(dev); + + /* If we are coalescing interrupts, update the timer */ + /* Otherwise, clear it */ + if (priv->rxcoalescing) + gfar_write(&priv->regs->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(&priv->regs->rxic, 0); + + /* Just in case we need to wake the ring param changer */ + priv->rxclean = 1; + + spin_unlock(&priv->lock); +#endif + + return IRQ_HANDLED; +} + + +/* gfar_process_frame() -- handle one incoming packet if skb + * isn't NULL. Try the fastroute before using the stack */ +static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int length) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (skb == NULL) { +#ifdef BRIEF_GFAR_ERRORS + printk(KERN_WARNING "%s: Missing skb!!.\n", + dev->name); +#endif + priv->stats.rx_dropped++; + priv->extra_stats.rx_skbmissing++; + } else { + if(try_fastroute(skb, dev, length) == 0) { + /* Prep the skb for the packet */ + skb_put(skb, length); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, dev); + + /* Send the packet up the stack */ + if (RECEIVE(skb) == NET_RX_DROP) { + priv->extra_stats.kernel_dropped++; + } + } + } + + return 0; +} + +/* gfar_clean_rx_ring() -- Processes each frame in the rx ring + * until all are gone (or, in the case of NAPI, the budget/quota + * has been reached). Returns the number of frames handled + */ +#ifdef CONFIG_GFAR_NAPI +static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) +#else +static int gfar_clean_rx_ring(struct net_device *dev) +#endif +{ + struct rxbd8 *bdp; + struct sk_buff *skb; + u16 pkt_len; + int howmany = 0; + struct gfar_private *priv = netdev_priv(dev); + + /* Get the first full descriptor */ + bdp = priv->cur_rx; + +#ifdef CONFIG_GFAR_NAPI +#define GFAR_RXDONE() ((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0)) +#else +#define GFAR_RXDONE() (bdp->status & RXBD_EMPTY) +#endif + while (!GFAR_RXDONE()) { + skb = priv->rx_skbuff[priv->skb_currx]; + + if (!(bdp->status & + (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET + | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { + /* Increment the number of packets */ + priv->stats.rx_packets++; + howmany++; + + /* Remove the FCS from the packet length */ + pkt_len = bdp->length - 4; + + gfar_process_frame(dev, skb, pkt_len); + + priv->stats.rx_bytes += pkt_len; + + } else { + count_errors(bdp->status, priv); + + if (skb) + dev_kfree_skb_any(skb); + + priv->rx_skbuff[priv->skb_currx] = NULL; + } + + dev->last_rx = jiffies; + + /* Clear the status flags for this buffer */ + bdp->status &= ~RXBD_STATS; + + /* Add another skb for the future */ + skb = gfar_new_skb(dev, bdp); + priv->rx_skbuff[priv->skb_currx] = skb; + + /* Update to the next pointer */ + if (bdp->status & RXBD_WRAP) + bdp = priv->rx_bd_base; + else + bdp++; + + /* update to point at the next skb */ + priv->skb_currx = + (priv->skb_currx + + 1) & RX_RING_MOD_MASK(priv->rx_ring_size); + + } + + /* Update the current rxbd pointer to be the next one */ + priv->cur_rx = bdp; + + /* If no packets have arrived since the + * last one we processed, clear the IEVENT RX and + * BSY bits so that another interrupt won't be + * generated when we set IMASK */ + if (bdp->status & RXBD_EMPTY) + gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); + + return howmany; +} + +#ifdef CONFIG_GFAR_NAPI +static int gfar_poll(struct net_device *dev, int *budget) +{ + int howmany; + struct gfar_private *priv = netdev_priv(dev); + int rx_work_limit = *budget; + + if (rx_work_limit > dev->quota) + rx_work_limit = dev->quota; + + spin_lock(&priv->lock); + howmany = gfar_clean_rx_ring(dev, rx_work_limit); + + dev->quota -= howmany; + rx_work_limit -= howmany; + *budget -= howmany; + + if (rx_work_limit >= 0) { + netif_rx_complete(dev); + + /* Clear the halt bit in RSTAT */ + gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); + + gfar_write(&priv->regs->imask, IMASK_DEFAULT); + + /* If we are coalescing interrupts, update the timer */ + /* Otherwise, clear it */ + if (priv->rxcoalescing) + gfar_write(&priv->regs->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(&priv->regs->rxic, 0); + + /* Signal to the ring size changer that it's safe to go */ + priv->rxclean = 1; + } + + spin_unlock(priv->lock); + + return (rx_work_limit < 0) ? 1 : 0; +} +#endif + +/* The interrupt handler for devices with one interrupt */ +static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct gfar_private *priv = netdev_priv(dev); + + /* Save ievent for future reference */ + u32 events = gfar_read(&priv->regs->ievent); + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, events); + + /* Check for reception */ + if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0)) + gfar_receive(irq, dev_id, regs); + + /* Check for transmit completion */ + if ((events & IEVENT_TXF) || (events & IEVENT_TXB)) + gfar_transmit(irq, dev_id, regs); + + /* Update error statistics */ + if (events & IEVENT_TXE) { + priv->stats.tx_errors++; + + if (events & IEVENT_LC) + priv->stats.tx_window_errors++; + if (events & IEVENT_CRL) + priv->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_WARNING "%s: tx underrun. dropped packet\n", + dev->name); +#endif + priv->stats.tx_dropped++; + priv->extra_stats.tx_underrun++; + + /* Reactivate the Tx Queues */ + gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + } + } + if (events & IEVENT_BSY) { + priv->stats.rx_errors++; + priv->extra_stats.rx_bsy++; + + gfar_receive(irq, dev_id, regs); + +#ifndef CONFIG_GFAR_NAPI + /* Clear the halt bit in RSTAT */ + gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); +#endif + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, + gfar_read(priv->regs->rstat)); +#endif + } + if (events & IEVENT_BABR) { + priv->stats.rx_errors++; + priv->extra_stats.rx_babr++; + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babbling error\n", dev->name); +#endif + } + if (events & IEVENT_EBERR) { + priv->extra_stats.eberr++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: EBERR\n", dev->name); +#endif + } + if (events & IEVENT_RXC) { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: control frame\n", dev->name); +#endif + } + + if (events & IEVENT_BABT) { + priv->extra_stats.tx_babt++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babt error\n", dev->name); +#endif + } + + return IRQ_HANDLED; +} + +static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct gfar_private *priv = netdev_priv(dev); + + /* Run the commands which acknowledge the interrupt */ + phy_run_commands(dev, priv->phyinfo->ack_int); + + /* Schedule the bottom half */ + schedule_work(&priv->tq); + + return IRQ_HANDLED; +} + +/* Scheduled by the phy_interrupt/timer to handle PHY changes */ +static void gfar_phy_change(void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct gfar_private *priv = netdev_priv(dev); + int timeout = HZ / 1000 + 1; + + /* Delay to give the PHY a chance to change the + * register state */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(timeout); + + /* Run the commands which check the link state */ + phy_run_commands(dev, priv->phyinfo->handle_int); + + /* React to the change in state */ + adjust_link(dev); +} + +/* Called every so often on systems that don't interrupt + * the core for PHY changes */ +static void gfar_phy_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct gfar_private *priv = netdev_priv(dev); + + schedule_work(&priv->tq); + + mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ); +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the priv structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 tempval; + + if (priv->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (priv->duplexity != priv->olddplx) { + if (!(priv->duplexity)) { + tempval = gfar_read(®s->maccfg2); + tempval &= ~(MACCFG2_FULL_DUPLEX); + gfar_write(®s->maccfg2, tempval); + + printk(KERN_INFO "%s: Half Duplex\n", + dev->name); + } else { + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_FULL_DUPLEX; + gfar_write(®s->maccfg2, tempval); + + printk(KERN_INFO "%s: Full Duplex\n", + dev->name); + } + + priv->olddplx = priv->duplexity; + } + + if (priv->speed != priv->oldspeed) { + switch (priv->speed) { + case 1000: + tempval = gfar_read(®s->maccfg2); + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + gfar_write(®s->maccfg2, tempval); + break; + case 100: + case 10: + tempval = gfar_read(®s->maccfg2); + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + gfar_write(®s->maccfg2, tempval); + break; + default: + printk(KERN_WARNING + "%s: Ack! Speed (%d) is not 10/100/1000!\n", + dev->name, priv->speed); + break; + } + + printk(KERN_INFO "%s: Speed %dBT\n", dev->name, + priv->speed); + + priv->oldspeed = priv->speed; + } + + if (!priv->oldlink) { + printk(KERN_INFO "%s: Link is up\n", dev->name); + priv->oldlink = 1; + netif_carrier_on(dev); + netif_schedule(dev); + } + } else { + if (priv->oldlink) { + printk(KERN_INFO "%s: Link is down\n", dev->name); + priv->oldlink = 0; + priv->oldspeed = 0; + priv->olddplx = -1; + netif_carrier_off(dev); + } + } +} + + +/* Update the hash table based on the current list of multicast + * addresses we subscribe to. Also, change the promiscuity of + * the device based on the flags (this function is called + * whenever dev->flags is changed */ +static void gfar_set_multi(struct net_device *dev) +{ + struct dev_mc_list *mc_ptr; + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 tempval; + + if(dev->flags & IFF_PROMISC) { + printk(KERN_INFO "%s: Entering promiscuous mode.\n", + dev->name); + /* Set RCTRL to PROM */ + tempval = gfar_read(®s->rctrl); + tempval |= RCTRL_PROM; + gfar_write(®s->rctrl, tempval); + } else { + /* Set RCTRL to not PROM */ + tempval = gfar_read(®s->rctrl); + tempval &= ~(RCTRL_PROM); + gfar_write(®s->rctrl, tempval); + } + + if(dev->flags & IFF_ALLMULTI) { + /* Set the hash to rx all multicast frames */ + gfar_write(®s->gaddr0, 0xffffffff); + gfar_write(®s->gaddr1, 0xffffffff); + gfar_write(®s->gaddr2, 0xffffffff); + gfar_write(®s->gaddr3, 0xffffffff); + gfar_write(®s->gaddr4, 0xffffffff); + gfar_write(®s->gaddr5, 0xffffffff); + gfar_write(®s->gaddr6, 0xffffffff); + gfar_write(®s->gaddr7, 0xffffffff); + } else { + /* zero out the hash */ + gfar_write(®s->gaddr0, 0x0); + gfar_write(®s->gaddr1, 0x0); + gfar_write(®s->gaddr2, 0x0); + gfar_write(®s->gaddr3, 0x0); + gfar_write(®s->gaddr4, 0x0); + gfar_write(®s->gaddr5, 0x0); + gfar_write(®s->gaddr6, 0x0); + gfar_write(®s->gaddr7, 0x0); + + if(dev->mc_count == 0) + return; + + /* Parse the list, and set the appropriate bits */ + for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { + gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); + } + } + + return; +} + +/* Set the appropriate hash bit for the given addr */ +/* The algorithm works like so: + * 1) Take the Destination Address (ie the multicast address), and + * do a CRC on it (little endian), and reverse the bits of the + * result. + * 2) Use the 8 most significant bits as a hash into a 256-entry + * table. The table is controlled through 8 32-bit registers: + * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is + * gaddr7. This means that the 3 most significant bits in the + * hash index which gaddr register to use, and the 5 other bits + * indicate which bit (assuming an IBM numbering scheme, which + * for PowerPC (tm) is usually the case) in the register holds + * the entry. */ +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) +{ + u32 tempval; + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 *hash = ®s->gaddr0; + u32 result = ether_crc(MAC_ADDR_LEN, addr); + u8 whichreg = ((result >> 29) & 0x7); + u8 whichbit = ((result >> 24) & 0x1f); + u32 value = (1 << (31-whichbit)); + + tempval = gfar_read(&hash[whichreg]); + tempval |= value; + gfar_write(&hash[whichreg], tempval); + + return; +} + +/* GFAR error interrupt handler */ +static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct gfar_private *priv = netdev_priv(dev); + + /* Save ievent for future reference */ + u32 events = gfar_read(&priv->regs->ievent); + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); + + /* Hmm... */ +#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS) + printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", + dev->name, events, gfar_read(priv->regs->imask)); +#endif + + /* Update the error counters */ + if (events & IEVENT_TXE) { + priv->stats.tx_errors++; + + if (events & IEVENT_LC) + priv->stats.tx_window_errors++; + if (events & IEVENT_CRL) + priv->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: underrun. packet dropped.\n", + dev->name); +#endif + priv->stats.tx_dropped++; + priv->extra_stats.tx_underrun++; + + /* Reactivate the Tx Queues */ + gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + } +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); +#endif + } + if (events & IEVENT_BSY) { + priv->stats.rx_errors++; + priv->extra_stats.rx_bsy++; + + gfar_receive(irq, dev_id, regs); + +#ifndef CONFIG_GFAR_NAPI + /* Clear the halt bit in RSTAT */ + gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); +#endif + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, + gfar_read(priv->regs->rstat)); +#endif + } + if (events & IEVENT_BABR) { + priv->stats.rx_errors++; + priv->extra_stats.rx_babr++; + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babbling error\n", dev->name); +#endif + } + if (events & IEVENT_EBERR) { + priv->extra_stats.eberr++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: EBERR\n", dev->name); +#endif + } + if (events & IEVENT_RXC) +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: control frame\n", dev->name); +#endif + + if (events & IEVENT_BABT) { + priv->extra_stats.tx_babt++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babt error\n", dev->name); +#endif + } + return IRQ_HANDLED; +} + +/* Structure for a device driver */ +static struct ocp_device_id gfar_ids[] = { + {.vendor = OCP_ANY_ID,.function = OCP_FUNC_GFAR}, + {.vendor = OCP_VENDOR_INVALID} +}; + +static struct ocp_driver gfar_driver = { + .name = "gianfar", + .id_table = gfar_ids, + + .probe = gfar_probe, + .remove = gfar_remove, +}; + +static int __init gfar_init(void) +{ + int rc; + + rc = ocp_register_driver(&gfar_driver); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41) + if (rc != 0) { +#else + if (rc == 0) { +#endif + ocp_unregister_driver(&gfar_driver); + return -ENODEV; + } + + return 0; +} + +static void __exit gfar_exit(void) +{ + ocp_unregister_driver(&gfar_driver); +} + +module_init(gfar_init); +module_exit(gfar_exit); diff -Nru a/drivers/net/gianfar.h b/drivers/net/gianfar.h --- /dev/null Wed Dec 31 16:00:00 196900 +++ b/drivers/net/gianfar.h 2004-07-13 12:46:19 -07:00 @@ -0,0 +1,537 @@ +/* + * drivers/net/gianfar.h + * + * Gianfar Ethernet Driver + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Still left to do: + * -Add support for module parameters + */ +#ifndef __GIANFAR_H +#define __GIANFAR_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41) +#include +#else +#include +#define work_struct tq_struct +#define schedule_work schedule_task +#endif + +#include +#include +#include +#include "gianfar_phy.h" + +/* The maximum number of packets to be handled in one call of gfar_poll */ +#define GFAR_DEV_WEIGHT 64 + +/* Number of bytes to align the rx bufs to */ +#define RXBUF_ALIGNMENT 64 + +/* The number of bytes which composes a unit for the purpose of + * allocating data buffers. ie-for any given MTU, the data buffer + * will be the next highest multiple of 512 bytes. */ +#define INCREMENTAL_BUFFER_SIZE 512 + + +#define MAC_ADDR_LEN 6 + +extern char gfar_driver_name[]; +extern char gfar_driver_version[]; + +/* These need to be powers of 2 for this driver */ +#ifdef CONFIG_GFAR_NAPI +#define DEFAULT_TX_RING_SIZE 256 +#define DEFAULT_RX_RING_SIZE 256 +#else +#define DEFAULT_TX_RING_SIZE 64 +#define DEFAULT_RX_RING_SIZE 64 +#endif + +#define GFAR_RX_MAX_RING_SIZE 256 +#define GFAR_TX_MAX_RING_SIZE 256 + +#define DEFAULT_RX_BUFFER_SIZE 1536 +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) +#define JUMBO_BUFFER_SIZE 9728 +#define JUMBO_FRAME_SIZE 9600 + +/* Latency of interface clock in nanoseconds */ +/* Interface clock latency , in this case, means the + * time described by a value of 1 in the interrupt + * coalescing registers' time fields. Since those fields + * refer to the time it takes for 64 clocks to pass, the + * latencies are as such: + * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick + * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick + * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick + */ +#define GFAR_GBIT_TIME 512 +#define GFAR_100_TIME 2560 +#define GFAR_10_TIME 25600 + +#define DEFAULT_TXCOUNT 16 +#define DEFAULT_TXTIME 32768 + +#define DEFAULT_RXCOUNT 16 +#define DEFAULT_RXTIME 32768 + +#define TBIPA_VALUE 0x1f +#define MIIMCFG_INIT_VALUE 0x00000007 +#define MIIMCFG_RESET 0x80000000 +#define MIIMIND_BUSY 0x00000001 + +/* MAC register bits */ +#define MACCFG1_SOFT_RESET 0x80000000 +#define MACCFG1_RESET_RX_MC 0x00080000 +#define MACCFG1_RESET_TX_MC 0x00040000 +#define MACCFG1_RESET_RX_FUN 0x00020000 +#define MACCFG1_RESET_TX_FUN 0x00010000 +#define MACCFG1_LOOPBACK 0x00000100 +#define MACCFG1_RX_FLOW 0x00000020 +#define MACCFG1_TX_FLOW 0x00000010 +#define MACCFG1_SYNCD_RX_EN 0x00000008 +#define MACCFG1_RX_EN 0x00000004 +#define MACCFG1_SYNCD_TX_EN 0x00000002 +#define MACCFG1_TX_EN 0x00000001 + +#define MACCFG2_INIT_SETTINGS 0x00007205 +#define MACCFG2_FULL_DUPLEX 0x00000001 +#define MACCFG2_IF 0x00000300 +#define MACCFG2_MII 0x00000100 +#define MACCFG2_GMII 0x00000200 +#define MACCFG2_HUGEFRAME 0x00000020 +#define MACCFG2_LENGTHCHECK 0x00000010 + +#define ECNTRL_INIT_SETTINGS 0x00001000 +#define ECNTRL_TBI_MODE 0x00000020 + +#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE + +#define MINFLR_INIT_SETTINGS 0x00000040 + +/* Init to do tx snooping for buffers and descriptors */ +#define DMACTRL_INIT_SETTINGS 0x000000c3 +#define DMACTRL_GRS 0x00000010 +#define DMACTRL_GTS 0x00000008 + +#define TSTAT_CLEAR_THALT 0x80000000 + +/* Interrupt coalescing macros */ +#define IC_ICEN 0x80000000 +#define IC_ICFT_MASK 0x1fe00000 +#define IC_ICFT_SHIFT 21 +#define mk_ic_icft(x) \ + (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK) +#define IC_ICTT_MASK 0x0000ffff +#define mk_ic_ictt(x) (x&IC_ICTT_MASK) + +#define mk_ic_value(count, time) (IC_ICEN | \ + mk_ic_icft(count) | \ + mk_ic_ictt(time)) + +#define RCTRL_PROM 0x00000008 +#define RSTAT_CLEAR_RHALT 0x00800000 + +#define IEVENT_INIT_CLEAR 0xffffffff +#define IEVENT_BABR 0x80000000 +#define IEVENT_RXC 0x40000000 +#define IEVENT_BSY 0x20000000 +#define IEVENT_EBERR 0x10000000 +#define IEVENT_MSRO 0x04000000 +#define IEVENT_GTSC 0x02000000 +#define IEVENT_BABT 0x01000000 +#define IEVENT_TXC 0x00800000 +#define IEVENT_TXE 0x00400000 +#define IEVENT_TXB 0x00200000 +#define IEVENT_TXF 0x00100000 +#define IEVENT_LC 0x00040000 +#define IEVENT_CRL 0x00020000 +#define IEVENT_XFUN 0x00010000 +#define IEVENT_RXB0 0x00008000 +#define IEVENT_GRSC 0x00000100 +#define IEVENT_RXF0 0x00000080 +#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) +#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) +#define IEVENT_ERR_MASK \ +(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ + IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ + | IEVENT_CRL | IEVENT_XFUN) + +#define IMASK_INIT_CLEAR 0x00000000 +#define IMASK_BABR 0x80000000 +#define IMASK_RXC 0x40000000 +#define IMASK_BSY 0x20000000 +#define IMASK_EBERR 0x10000000 +#define IMASK_MSRO 0x04000000 +#define IMASK_GRSC 0x02000000 +#define IMASK_BABT 0x01000000 +#define IMASK_TXC 0x00800000 +#define IMASK_TXEEN 0x00400000 +#define IMASK_TXBEN 0x00200000 +#define IMASK_TXFEN 0x00100000 +#define IMASK_LC 0x00040000 +#define IMASK_CRL 0x00020000 +#define IMASK_XFUN 0x00010000 +#define IMASK_RXB0 0x00008000 +#define IMASK_GTSC 0x00000100 +#define IMASK_RXFEN0 0x00000080 +#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY) +#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ + IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ + IMASK_XFUN | IMASK_RXC | IMASK_BABT) + + +/* Attribute fields */ + +/* This enables rx snooping for buffers and descriptors */ +#ifdef CONFIG_GFAR_BDSTASH +#define ATTR_BDSTASH 0x00000800 +#else +#define ATTR_BDSTASH 0x00000000 +#endif + +#ifdef CONFIG_GFAR_BUFSTASH +#define ATTR_BUFSTASH 0x00004000 +#define STASH_LENGTH 64 +#else +#define ATTR_BUFSTASH 0x00000000 +#endif + +#define ATTR_SNOOPING 0x000000c0 +#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \ + | ATTR_BDSTASH | ATTR_BUFSTASH) + +#define ATTRELI_INIT_SETTINGS 0x0 + + +/* TxBD status field bits */ +#define TXBD_READY 0x8000 +#define TXBD_PADCRC 0x4000 +#define TXBD_WRAP 0x2000 +#define TXBD_INTERRUPT 0x1000 +#define TXBD_LAST 0x0800 +#define TXBD_CRC 0x0400 +#define TXBD_DEF 0x0200 +#define TXBD_HUGEFRAME 0x0080 +#define TXBD_LATECOLLISION 0x0080 +#define TXBD_RETRYLIMIT 0x0040 +#define TXBD_RETRYCOUNTMASK 0x003c +#define TXBD_UNDERRUN 0x0002 + +/* RxBD status field bits */ +#define RXBD_EMPTY 0x8000 +#define RXBD_RO1 0x4000 +#define RXBD_WRAP 0x2000 +#define RXBD_INTERRUPT 0x1000 +#define RXBD_LAST 0x0800 +#define RXBD_FIRST 0x0400 +#define RXBD_MISS 0x0100 +#define RXBD_BROADCAST 0x0080 +#define RXBD_MULTICAST 0x0040 +#define RXBD_LARGE 0x0020 +#define RXBD_NONOCTET 0x0010 +#define RXBD_SHORT 0x0008 +#define RXBD_CRCERR 0x0004 +#define RXBD_OVERRUN 0x0002 +#define RXBD_TRUNCATED 0x0001 +#define RXBD_STATS 0x01ff + +struct txbd8 +{ + u16 status; /* Status Fields */ + u16 length; /* Buffer length */ + u32 bufPtr; /* Buffer Pointer */ +}; + +struct rxbd8 +{ + u16 status; /* Status Fields */ + u16 length; /* Buffer Length */ + u32 bufPtr; /* Buffer Pointer */ +}; + +struct rmon_mib +{ + u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ + u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */ + u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */ + u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */ + u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */ + u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */ + u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */ + u32 rbyt; /* 0x.69c - Receive Byte Counter */ + u32 rpkt; /* 0x.6a0 - Receive Packet Counter */ + u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */ + u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */ + u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */ + u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */ + u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */ + u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */ + u32 raln; /* 0x.6bc - Receive Alignment Error Counter */ + u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */ + u32 rcde; /* 0x.6c4 - Receive Code Error Counter */ + u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */ + u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */ + u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */ + u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */ + u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */ + u32 rdrp; /* 0x.6dc - Receive Drop Counter */ + u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */ + u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */ + u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */ + u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */ + u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */ + u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */ + u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */ + u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */ + u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */ + u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */ + u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */ + u32 tncl; /* 0x.70c - Transmit Total Collision Counter */ + u8 res1[4]; + u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */ + u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */ + u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */ + u32 txcf; /* 0x.720 - Transmit Control Frame Counter */ + u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */ + u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */ + u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */ + u32 car1; /* 0x.730 - Carry Register One */ + u32 car2; /* 0x.734 - Carry Register Two */ + u32 cam1; /* 0x.738 - Carry Mask Register One */ + u32 cam2; /* 0x.73c - Carry Mask Register Two */ +}; + +struct gfar_extra_stats { + u64 kernel_dropped; + u64 rx_large; + u64 rx_short; + u64 rx_nonoctet; + u64 rx_crcerr; + u64 rx_overrun; + u64 rx_bsy; + u64 rx_babr; + u64 rx_trunc; + u64 eberr; + u64 tx_babt; + u64 tx_underrun; + u64 rx_skbmissing; + u64 tx_timeout; +}; + +#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32)) +#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64)) + +/* Number of stats in the stats structure (ignore car and cam regs)*/ +#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN) + +#define GFAR_INFOSTR_LEN 32 + +struct gfar_stats { + u64 extra[GFAR_EXTRA_STATS_LEN]; + u64 rmon[GFAR_RMON_LEN]; +}; + + +struct gfar { + u8 res1[16]; + u32 ievent; /* 0x.010 - Interrupt Event Register */ + u32 imask; /* 0x.014 - Interrupt Mask Register */ + u32 edis; /* 0x.018 - Error Disabled Register */ + u8 res2[4]; + u32 ecntrl; /* 0x.020 - Ethernet Control Register */ + u32 minflr; /* 0x.024 - Minimum Frame Length Register */ + u32 ptv; /* 0x.028 - Pause Time Value Register */ + u32 dmactrl; /* 0x.02c - DMA Control Register */ + u32 tbipa; /* 0x.030 - TBI PHY Address Register */ + u8 res3[88]; + u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ + u8 res4[8]; + u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ + u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ + u8 res5[96]; + u32 tctrl; /* 0x.100 - Transmit Control Register */ + u32 tstat; /* 0x.104 - Transmit Status Register */ + u8 res6[4]; + u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */ + u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */ + u8 res7[16]; + u32 ctbptr; /* 0x.124 - Current Transmit Buffer Descriptor Pointer Register */ + u8 res8[92]; + u32 tbptr; /* 0x.184 - Transmit Buffer Descriptor Pointer Low Register */ + u8 res9[124]; + u32 tbase; /* 0x.204 - Transmit Descriptor Base Address Register */ + u8 res10[168]; + u32 ostbd; /* 0x.2b0 - Out-of-Sequence Transmit Buffer Descriptor Register */ + u32 ostbdp; /* 0x.2b4 - Out-of-Sequence Transmit Data Buffer Pointer Register */ + u8 res11[72]; + u32 rctrl; /* 0x.300 - Receive Control Register */ + u32 rstat; /* 0x.304 - Receive Status Register */ + u8 res12[4]; + u32 rbdlen; /* 0x.30c - RxBD Data Length Register */ + u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ + u8 res13[16]; + u32 crbptr; /* 0x.324 - Current Receive Buffer Descriptor Pointer */ + u8 res14[24]; + u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */ + u8 res15[64]; + u32 rbptr; /* 0x.384 - Receive Buffer Descriptor Pointer */ + u8 res16[124]; + u32 rbase; /* 0x.404 - Receive Descriptor Base Address */ + u8 res17[248]; + u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */ + u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */ + u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */ + u32 hafdup; /* 0x.50c - Half Duplex Register */ + u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ + u8 res18[12]; + u32 miimcfg; /* 0x.520 - MII Management Configuration Register */ + u32 miimcom; /* 0x.524 - MII Management Command Register */ + u32 miimadd; /* 0x.528 - MII Management Address Register */ + u32 miimcon; /* 0x.52c - MII Management Control Register */ + u32 miimstat; /* 0x.530 - MII Management Status Register */ + u32 miimind; /* 0x.534 - MII Management Indicator Register */ + u8 res19[4]; + u32 ifstat; /* 0x.53c - Interface Status Register */ + u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ + u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ + u8 res20[312]; + struct rmon_mib rmon; + u8 res21[192]; + u32 iaddr0; /* 0x.800 - Indivdual address register 0 */ + u32 iaddr1; /* 0x.804 - Indivdual address register 1 */ + u32 iaddr2; /* 0x.808 - Indivdual address register 2 */ + u32 iaddr3; /* 0x.80c - Indivdual address register 3 */ + u32 iaddr4; /* 0x.810 - Indivdual address register 4 */ + u32 iaddr5; /* 0x.814 - Indivdual address register 5 */ + u32 iaddr6; /* 0x.818 - Indivdual address register 6 */ + u32 iaddr7; /* 0x.81c - Indivdual address register 7 */ + u8 res22[96]; + u32 gaddr0; /* 0x.880 - Global address register 0 */ + u32 gaddr1; /* 0x.884 - Global address register 1 */ + u32 gaddr2; /* 0x.888 - Global address register 2 */ + u32 gaddr3; /* 0x.88c - Global address register 3 */ + u32 gaddr4; /* 0x.890 - Global address register 4 */ + u32 gaddr5; /* 0x.894 - Global address register 5 */ + u32 gaddr6; /* 0x.898 - Global address register 6 */ + u32 gaddr7; /* 0x.89c - Global address register 7 */ + u8 res23[856]; + u32 attr; /* 0x.bf8 - Attributes Register */ + u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ + u8 res24[1024]; + +}; + +/* Struct stolen almost completely (and shamelessly) from the FCC enet source + * (Ok, that's not so true anymore, but there is a family resemblence) + * The GFAR buffer descriptors track the ring buffers. The rx_bd_base + * and tx_bd_base always point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct gfar_private +{ + /* pointers to arrays of skbuffs for tx and rx */ + struct sk_buff ** tx_skbuff; + struct sk_buff ** rx_skbuff; + + /* indices pointing to the next free sbk in skb arrays */ + u16 skb_curtx; + u16 skb_currx; + + /* index of the first skb which hasn't been transmitted + * yet. */ + u16 skb_dirtytx; + + /* Configuration info for the coalescing features */ + unsigned char txcoalescing; + unsigned short txcount; + unsigned short txtime; + unsigned char rxcoalescing; + unsigned short rxcount; + unsigned short rxtime; + + /* GFAR addresses */ + struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */ + struct txbd8 *tx_bd_base; + struct rxbd8 *cur_rx; /* Next free rx ring entry */ + struct txbd8 *cur_tx; /* Next free ring entry */ + struct txbd8 *dirty_tx; /* The Ring entry to be freed. */ + struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */ + struct phy_info *phyinfo; + struct gfar *phyregs; + struct work_struct tq; + struct timer_list phy_info_timer; + struct net_device_stats stats; /* linux network statistics */ + struct gfar_extra_stats extra_stats; + spinlock_t lock; + unsigned int rx_buffer_size; + unsigned int rx_stash_size; + unsigned int tx_ring_size; + unsigned int rx_ring_size; + wait_queue_head_t rxcleanupq; + unsigned int rxclean; + int link; /* current link state */ + int oldlink; + int duplexity; /* Indicates negotiated duplex state */ + int olddplx; + int speed; /* Indicates negotiated speed */ + int oldspeed; + + /* Info structure initialized by board setup code */ + struct ocp_gfar_data *einfo; +}; + +extern inline u32 gfar_read(volatile unsigned *addr) +{ + u32 val; + val = in_be32(addr); + return val; +} + +extern inline void gfar_write(volatile unsigned *addr, u32 val) +{ + out_be32(addr, val); +} + + + +#endif /* __GIANFAR_H */ diff -Nru a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c --- /dev/null Wed Dec 31 16:00:00 196900 +++ b/drivers/net/gianfar_ethtool.c 2004-07-13 12:46:19 -07:00 @@ -0,0 +1,484 @@ +/* + * drivers/net/gianfar_ethtool.c + * + * Gianfar Ethernet Driver + * Ethtool support for Gianfar Enet + * Based on e1000 ethtool support + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This software may be used and distributed according to + * the terms of the GNU Public License, Version 2, incorporated herein + * by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" + +#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) + +extern int startup_gfar(struct net_device *dev); +extern void stop_gfar(struct net_device *dev); +extern void gfar_receive(int irq, void *dev_id, struct pt_regs *regs); + +void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 * buf); +void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); +int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); +int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); +void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); +int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); +void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); + +static char stat_gstrings[][ETH_GSTRING_LEN] = { + "RX Dropped by Kernel", + "RX Large Frame Errors", + "RX Short Frame Errors", + "RX Non-Octet Errors", + "RX CRC Errors", + "RX Overrun Errors", + "RX Busy Errors", + "RX Babbling Errors", + "RX Truncated Frames", + "Ethernet Bus Error", + "TX Babbling Errors", + "TX Underrun Errors", + "RX SKB Missing Errors", + "TX Timeout Errors", + "tx&rx 64B frames", + "tx&rx 65-127B frames", + "tx&rx 128-255B frames", + "tx&rx 256-511B frames", + "tx&rx 512-1023B frames", + "tx&rx 1024-1518B frames", + "tx&rx 1519-1522B Good VLAN", + "RX bytes", + "RX Packets", + "RX FCS Errors", + "Receive Multicast Packet", + "Receive Broadcast Packet", + "RX Control Frame Packets", + "RX Pause Frame Packets", + "RX Unknown OP Code", + "RX Alignment Error", + "RX Frame Length Error", + "RX Code Error", + "RX Carrier Sense Error", + "RX Undersize Packets", + "RX Oversize Packets", + "RX Fragmented Frames", + "RX Jabber Frames", + "RX Dropped Frames", + "TX Byte Counter", + "TX Packets", + "TX Multicast Packets", + "TX Broadcast Packets", + "TX Pause Control Frames", + "TX Deferral Packets", + "TX Excessive Deferral Packets", + "TX Single Collision Packets", + "TX Multiple Collision Packets", + "TX Late Collision Packets", + "TX Excessive Collision Packets", + "TX Total Collision", + "RESERVED", + "TX Dropped Frames", + "TX Jabber Frames", + "TX FCS Errors", + "TX Control Frames", + "TX Oversize Frames", + "TX Undersize Frames", + "TX Fragmented Frames", +}; + +/* Fill in an array of 64-bit statistics from various sources. + * This array will be appended to the end of the ethtool_stats + * structure, and returned to user space + */ +void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) +{ + int i; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + u32 *rmon = (u32 *) & priv->regs->rmon; + u64 *extra = (u64 *) & priv->extra_stats; + struct gfar_stats *stats = (struct gfar_stats *) buf; + + for (i = 0; i < GFAR_RMON_LEN; i++) { + stats->rmon[i] = (u64) (rmon[i]); + } + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) { + stats->extra[i] = extra[i]; + } +} + +/* Returns the number of stats (and their corresponding strings) */ +int gfar_stats_count(struct net_device *dev) +{ + return GFAR_STATS_LEN; +} + +void gfar_gstrings_normon(struct net_device *dev, u32 stringset, u8 * buf) +{ + memcpy(buf, stat_gstrings, GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); +} + +void gfar_fill_stats_normon(struct net_device *dev, + struct ethtool_stats *dummy, u64 * buf) +{ + int i; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + u64 *extra = (u64 *) & priv->extra_stats; + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) { + buf[i] = extra[i]; + } +} + + +int gfar_stats_count_normon(struct net_device *dev) +{ + return GFAR_EXTRA_STATS_LEN; +} +/* Fills in the drvinfo structure with some basic info */ +void gfar_gdrvinfo(struct net_device *dev, struct + ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, gfar_driver_name, GFAR_INFOSTR_LEN); + strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); + strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); + strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); + drvinfo->n_stats = GFAR_STATS_LEN; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +/* Return the current settings in the ethtool_cmd structure */ +int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + uint gigabit_support = + priv->einfo->flags & GFAR_HAS_GIGABIT ? SUPPORTED_1000baseT_Full : 0; + uint gigabit_advert = + priv->einfo->flags & GFAR_HAS_GIGABIT ? ADVERTISED_1000baseT_Full: 0; + + cmd->supported = (SUPPORTED_10baseT_Half + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | gigabit_support | SUPPORTED_Autoneg); + + /* For now, we always advertise everything */ + cmd->advertising = (ADVERTISED_10baseT_Half + | ADVERTISED_100baseT_Half + | ADVERTISED_100baseT_Full + | gigabit_advert | ADVERTISED_Autoneg); + + cmd->speed = priv->speed; + cmd->duplex = priv->duplexity; + cmd->port = PORT_MII; + cmd->phy_address = priv->einfo->phyid; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + cmd->maxtxpkt = priv->txcount; + cmd->maxrxpkt = priv->rxcount; + + return 0; +} + +/* Return the length of the register structure */ +int gfar_reglen(struct net_device *dev) +{ + return sizeof (struct gfar); +} + +/* Return a dump of the GFAR register space */ +void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) +{ + int i; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + u32 *theregs = (u32 *) priv->regs; + u32 *buf = (u32 *) regbuf; + + for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) + buf[i] = theregs[i]; +} + +/* Return the link state 1 is up, 0 is down */ +u32 gfar_get_link(struct net_device *dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + return (u32) priv->link; +} + +/* Fill in a buffer with the strings which correspond to the + * stats */ +void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) +{ + memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); +} + +/* Convert microseconds to ethernet clock ticks, which changes + * depending on what speed the controller is running at */ +static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->speed) { + case 1000: + count = GFAR_GBIT_TIME; + break; + case 100: + count = GFAR_100_TIME; + break; + case 10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 + * if usecs > 0 */ + return ((usecs * 1000 + count - 1) / count); +} + +/* Convert ethernet clock ticks to microseconds */ +static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->speed) { + case 1000: + count = GFAR_GBIT_TIME; + break; + case 100: + count = GFAR_100_TIME; + break; + case 10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 */ + /* if ticks is > 0 */ + return ((ticks * count) / 1000); +} + +/* Get the coalescing parameters, and put them in the cvals + * structure. */ +int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime); + cvals->rx_max_coalesced_frames = priv->rxcount; + + cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, priv->txtime); + cvals->tx_max_coalesced_frames = priv->txcount; + + cvals->use_adaptive_rx_coalesce = 0; + cvals->use_adaptive_tx_coalesce = 0; + + cvals->pkt_rate_low = 0; + cvals->rx_coalesce_usecs_low = 0; + cvals->rx_max_coalesced_frames_low = 0; + cvals->tx_coalesce_usecs_low = 0; + cvals->tx_max_coalesced_frames_low = 0; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + cvals->pkt_rate_high = 0; + cvals->rx_coalesce_usecs_high = 0; + cvals->rx_max_coalesced_frames_high = 0; + cvals->tx_coalesce_usecs_high = 0; + cvals->tx_max_coalesced_frames_high = 0; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + cvals->rate_sample_interval = 0; + + return 0; +} + +/* Change the coalescing values. + * Both cvals->*_usecs and cvals->*_frames have to be > 0 + * in order for coalescing to be active + */ +int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + /* Set up rx coalescing */ + if ((cvals->rx_coalesce_usecs == 0) || + (cvals->rx_max_coalesced_frames == 0)) + priv->rxcoalescing = 0; + else + priv->rxcoalescing = 1; + + priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs); + priv->rxcount = cvals->rx_max_coalesced_frames; + + /* Set up tx coalescing */ + if ((cvals->tx_coalesce_usecs == 0) || + (cvals->tx_max_coalesced_frames == 0)) + priv->txcoalescing = 0; + else + priv->txcoalescing = 1; + + priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs); + priv->txcount = cvals->tx_max_coalesced_frames; + + if (priv->rxcoalescing) + gfar_write(&priv->regs->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(&priv->regs->rxic, 0); + + if (priv->txcoalescing) + gfar_write(&priv->regs->txic, + mk_ic_value(priv->txcount, priv->txtime)); + else + gfar_write(&priv->regs->txic, 0); + + return 0; +} + +/* Fills in rvals with the current ring parameters. Currently, + * rx, rx_mini, and rx_jumbo rings are the same size, as mini and + * jumbo are ignored by the driver */ +void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + rvals->rx_pending = priv->rx_ring_size; + rvals->rx_mini_pending = priv->rx_ring_size; + rvals->rx_jumbo_pending = priv->rx_ring_size; + rvals->tx_pending = priv->tx_ring_size; +} + +/* Change the current ring parameters, stopping the controller if + * necessary so that we don't mess things up while we're in + * motion. We wait for the ring to be clean before reallocating + * the rings. */ +int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) +{ + u32 tempval; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + int err = 0; + + if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->rx_pending)) { + printk("%s: Ring sizes must be a power of 2\n", + dev->name); + return -EINVAL; + } + + if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->tx_pending)) { + printk("%s: Ring sizes must be a power of 2\n", + dev->name); + return -EINVAL; + } + + /* Stop the controller so we don't rx any more frames */ + /* But first, make sure we clear the bits */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + tempval = gfar_read(&priv->regs->dmactrl); + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) + cpu_relax(); + + /* Note that rx is not clean right now */ + priv->rxclean = 0; + + if (dev->flags & IFF_UP) { + /* Tell the driver to process the rest of the frames */ + gfar_receive(0, (void *) dev, NULL); + + /* Now wait for it to be done */ + wait_event_interruptible(priv->rxcleanupq, priv->rxclean); + + /* Ok, all packets have been handled. Now we bring it down, + * change the ring size, and bring it up */ + + stop_gfar(dev); + } + + priv->rx_ring_size = rvals->rx_pending; + priv->tx_ring_size = rvals->tx_pending; + + if (dev->flags & IFF_UP) + err = startup_gfar(dev); + + return err; +} + +struct ethtool_ops gfar_ethtool_ops = { + .get_settings = gfar_gsettings, + .get_drvinfo = gfar_gdrvinfo, + .get_regs_len = gfar_reglen, + .get_regs = gfar_get_regs, + .get_link = gfar_get_link, + .get_coalesce = gfar_gcoalesce, + .set_coalesce = gfar_scoalesce, + .get_ringparam = gfar_gringparam, + .set_ringparam = gfar_sringparam, + .get_strings = gfar_gstrings, + .get_stats_count = gfar_stats_count, + .get_ethtool_stats = gfar_fill_stats, +}; diff -Nru a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c --- /dev/null Wed Dec 31 16:00:00 196900 +++ b/drivers/net/gianfar_phy.c 2004-07-13 12:46:19 -07:00 @@ -0,0 +1,504 @@ +/* + * drivers/net/gianfar_phy.c + * + * Gianfar Ethernet Driver -- PHY handling + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" +#include "gianfar_phy.h" + +/* Write value to the PHY for this device to the register at regnum, */ +/* waiting until the write is done before it returns. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +void write_phy_reg(struct net_device *dev, u16 regnum, u16 value) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct gfar *regbase = priv->phyregs; + struct ocp_gfar_data *einfo = priv->einfo; + + /* Set the PHY address and the register address we want to write */ + gfar_write(®base->miimadd, ((einfo->phyid) << 8) | regnum); + + /* Write out the value we want */ + gfar_write(®base->miimcon, value); + + /* Wait for the transaction to finish */ + while (gfar_read(®base->miimind) & MIIMIND_BUSY) + cpu_relax(); +} + +/* Reads from register regnum in the PHY for device dev, */ +/* returning the value. Clears miimcom first. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +u16 read_phy_reg(struct net_device *dev, u16 regnum) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct gfar *regbase = priv->phyregs; + struct ocp_gfar_data *einfo = priv->einfo; + u16 value; + + /* Set the PHY address and the register address we want to read */ + gfar_write(®base->miimadd, ((einfo->phyid) << 8) | regnum); + + /* Clear miimcom, and then initiate a read */ + gfar_write(®base->miimcom, 0); + gfar_write(®base->miimcom, MIIM_READ_COMMAND); + + /* Wait for the transaction to finish */ + while (gfar_read(®base->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) + cpu_relax(); + + /* Grab the value of the register from miimstat */ + value = gfar_read(®base->miimstat); + + return value; +} + +/* returns which value to write to the control register. */ +/* For 10/100 the value is slightly different. */ +u16 mii_cr_init(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct ocp_gfar_data *einfo = priv->einfo; + + if (einfo->flags & GFAR_HAS_GIGABIT) + return MIIM_CONTROL_INIT; + else + return MIIM_CR_INIT; +} + +#define BRIEF_GFAR_ERRORS +/* Wait for auto-negotiation to complete */ +u16 mii_parse_sr(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + unsigned int timeout = GFAR_AN_TIMEOUT; + + if (mii_reg & MIIM_STATUS_LINK) + priv->link = 1; + else + priv->link = 0; + + /* Only auto-negotiate if the link has just gone up */ + if (priv->link && !priv->oldlink) { + while ((!(mii_reg & MIIM_STATUS_AN_DONE)) && timeout--) + mii_reg = read_phy_reg(dev, MIIM_STATUS); + +#if defined(BRIEF_GFAR_ERRORS) + if (mii_reg & MIIM_STATUS_AN_DONE) + printk(KERN_INFO "%s: Auto-negotiation done\n", + dev->name); + else + printk(KERN_INFO "%s: Auto-negotiation timed out\n", + dev->name); +#endif + } + + return 0; +} + +/* Determine the speed and duplex which was negotiated */ +u16 mii_parse_88E1011_psr(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + unsigned int speed; + + if (priv->link) { + if (mii_reg & MIIM_88E1011_PHYSTAT_DUPLEX) + priv->duplexity = 1; + else + priv->duplexity = 0; + + speed = (mii_reg & MIIM_88E1011_PHYSTAT_SPEED); + + switch (speed) { + case MIIM_88E1011_PHYSTAT_GBIT: + priv->speed = 1000; + break; + case MIIM_88E1011_PHYSTAT_100: + priv->speed = 100; + break; + default: + priv->speed = 10; + break; + } + } else { + priv->speed = 0; + priv->duplexity = 0; + } + + return 0; +} + +u16 mii_parse_cis8201(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + unsigned int speed; + + if (priv->link) { + if (mii_reg & MIIM_CIS8201_AUXCONSTAT_DUPLEX) + priv->duplexity = 1; + else + priv->duplexity = 0; + + speed = mii_reg & MIIM_CIS8201_AUXCONSTAT_SPEED; + + switch (speed) { + case MIIM_CIS8201_AUXCONSTAT_GBIT: + priv->speed = 1000; + break; + case MIIM_CIS8201_AUXCONSTAT_100: + priv->speed = 100; + break; + default: + priv->speed = 10; + break; + } + } else { + priv->speed = 0; + priv->duplexity = 0; + } + + return 0; +} + +u16 mii_parse_dm9161_scsr(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_100H)) + priv->speed = 100; + else + priv->speed = 10; + + if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_10F)) + priv->duplexity = 1; + else + priv->duplexity = 0; + + return 0; +} + +u16 dm9161_wait(u16 mii_reg, struct net_device *dev) +{ + int timeout = HZ; + int secondary = 10; + u16 temp; + + do { + + /* Davicom takes a bit to come up after a reset, + * so wait here for a bit */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(timeout); + + temp = read_phy_reg(dev, MIIM_STATUS); + + secondary--; + } while ((!(temp & MIIM_STATUS_AN_DONE)) && secondary); + + return 0; +} + +static struct phy_info phy_info_M88E1011S = { + 0x01410c6, + "Marvell 88E1011S", + 4, + (const struct phy_cmd[]) { /* config */ + /* Reset and configure the PHY */ + {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Status is read once to clear old link state */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr}, + /* Clear the IEVENT register */ + {MIIM_88E1011_IEVENT, miim_read, NULL}, + /* Set up the mask */ + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + /* Clear the interrupt */ + {MIIM_88E1011_IEVENT, miim_read, NULL}, + /* Disable interrupts */ + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Check the status */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr}, + /* Enable Interrupts */ + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + {MIIM_88E1011_IEVENT, miim_read, NULL}, + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL}, + {miim_end,} + }, +}; + +/* Cicada 8204 */ +static struct phy_info phy_info_cis8204 = { + 0x3f11, + "Cicada Cis8204", + 6, + (const struct phy_cmd[]) { /* config */ + /* Override PHY config settings */ + {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL}, + /* Set up the interface mode */ + {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL}, + /* Configure some basic stuff */ + {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201}, + /* Clear the status register */ + {MIIM_CIS8204_ISTAT, miim_read, NULL}, + /* Enable interrupts */ + {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + /* Clear the status register */ + {MIIM_CIS8204_ISTAT, miim_read, NULL}, + /* Disable interrupts */ + {MIIM_CIS8204_IMASK, 0x0, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201}, + /* Enable interrupts */ + {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + /* Clear the status register */ + {MIIM_CIS8204_ISTAT, miim_read, NULL}, + /* Disable interrupts */ + {MIIM_CIS8204_IMASK, 0x0, NULL}, + {miim_end,} + }, +}; + +/* Cicada 8201 */ +static struct phy_info phy_info_cis8201 = { + 0xfc41, + "CIS8201", + 4, + (const struct phy_cmd[]) { /* config */ + /* Override PHY config settings */ + {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL}, + /* Set up the interface mode */ + {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL}, + /* Configure some basic stuff */ + {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + {miim_end,} + }, +}; + +static struct phy_info phy_info_dm9161 = { + 0x0181b88, + "Davicom DM9161E", + 4, + (const struct phy_cmd[]) { /* config */ + {MIIM_CONTROL, MIIM_DM9161_CR_STOP, NULL}, + /* Do not bypass the scrambler/descrambler */ + {MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT, NULL}, + /* Clear 10BTCSR to default */ + {MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT, NULL}, + /* Configure some basic stuff */ + {MIIM_CONTROL, MIIM_CR_INIT, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Restart Auto Negotiation */ + {MIIM_CONTROL, MIIM_DM9161_CR_RSTAN, NULL}, + /* Status is read once to clear old link state */ + {MIIM_STATUS, miim_read, dm9161_wait}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr}, + /* Clear any pending interrupts */ + {MIIM_DM9161_INTR, miim_read, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + {MIIM_DM9161_INTR, miim_read, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + {MIIM_STATUS, miim_read, NULL}, + {MIIM_STATUS, miim_read, mii_parse_sr}, + {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + {MIIM_DM9161_INTR, miim_read, NULL}, + {miim_end,} + }, +}; + +static struct phy_info *phy_info[] = { + &phy_info_cis8201, + &phy_info_cis8204, + &phy_info_M88E1011S, + &phy_info_dm9161, + NULL +}; + +/* Use the PHY ID registers to determine what type of PHY is attached + * to device dev. return a struct phy_info structure describing that PHY + */ +struct phy_info * get_phy_info(struct net_device *dev) +{ + u16 phy_reg; + u32 phy_ID; + int i; + struct phy_info *theInfo = NULL; + + /* Grab the bits from PHYIR1, and put them in the upper half */ + phy_reg = read_phy_reg(dev, MIIM_PHYIR1); + phy_ID = (phy_reg & 0xffff) << 16; + + /* Grab the bits from PHYIR2, and put them in the lower half */ + phy_reg = read_phy_reg(dev, MIIM_PHYIR2); + phy_ID |= (phy_reg & 0xffff); + + /* loop through all the known PHY types, and find one that */ + /* matches the ID we read from the PHY. */ + for (i = 0; phy_info[i]; i++) + if (phy_info[i]->id == (phy_ID >> phy_info[i]->shift)) + theInfo = phy_info[i]; + + if (theInfo == NULL) { + printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID); + return NULL; + } else { + printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name, + phy_ID); + } + + return theInfo; +} + +/* Take a list of struct phy_cmd, and, depending on the values, either */ +/* read or write, using a helper function if provided */ +/* It is assumed that all lists of struct phy_cmd will be terminated by */ +/* mii_end. */ +void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd) +{ + int i; + u16 result; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct gfar *phyregs = priv->phyregs; + + /* Reset the management interface */ + gfar_write(&phyregs->miimcfg, MIIMCFG_RESET); + + /* Setup the MII Mgmt clock speed */ + gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE); + + /* Wait until the bus is free */ + while (gfar_read(&phyregs->miimind) & MIIMIND_BUSY) + cpu_relax(); + + for (i = 0; cmd->mii_reg != miim_end; i++) { + /* The command is a read if mii_data is miim_read */ + if (cmd->mii_data == miim_read) { + /* Read the value of the PHY reg */ + result = read_phy_reg(dev, cmd->mii_reg); + + /* If a function was supplied, we need to let it process */ + /* the result. */ + if (cmd->funct != NULL) + (*(cmd->funct)) (result, dev); + } else { /* Otherwise, it's a write */ + /* If a function was supplied, it will provide + * the value to write */ + /* Otherwise, the value was supplied in cmd->mii_data */ + if (cmd->funct != NULL) + result = (*(cmd->funct)) (0, dev); + else + result = cmd->mii_data; + + /* Write the appropriate value to the PHY reg */ + write_phy_reg(dev, cmd->mii_reg, result); + } + cmd++; + } +} diff -Nru a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h --- /dev/null Wed Dec 31 16:00:00 196900 +++ b/drivers/net/gianfar_phy.h 2004-07-13 12:46:19 -07:00 @@ -0,0 +1,192 @@ +/* + * drivers/net/gianfar_phy.h + * + * Gianfar Ethernet Driver -- PHY handling + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __GIANFAR_PHY_H +#define __GIANFAR_PHY_H + +#define miim_end ((u32)-2) +#define miim_read ((u32)-1) + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 + +#define MIIM_CONTROL 0x00 +#define MIIM_CONTROL_RESET 0x00008000 +#define MIIM_CONTROL_INIT 0x00001140 +#define MIIM_ANEN 0x00001000 + +#define MIIM_CR 0x00 +#define MIIM_CR_RST 0x00008000 +#define MIIM_CR_INIT 0x00001000 + +#define MIIM_STATUS 0x1 +#define MIIM_STATUS_AN_DONE 0x00000020 +#define MIIM_STATUS_LINK 0x0004 + +#define MIIM_PHYIR1 0x2 +#define MIIM_PHYIR2 0x3 + +#define GFAR_AN_TIMEOUT 0x000fffff + +#define MIIM_ANLPBPA 0x5 +#define MIIM_ANLPBPA_HALF 0x00000040 +#define MIIM_ANLPBPA_FULL 0x00000020 + +#define MIIM_ANEX 0x6 +#define MIIM_ANEX_NP 0x00000004 +#define MIIM_ANEX_PRX 0x00000002 + + +/* Cicada Extended Control Register 1 */ +#define MIIM_CIS8201_EXT_CON1 0x17 +#define MIIM_CIS8201_EXTCON1_INIT 0x0000 + +/* Cicada Interrupt Mask Register */ +#define MIIM_CIS8204_IMASK 0x19 +#define MIIM_CIS8204_IMASK_IEN 0x8000 +#define MIIM_CIS8204_IMASK_SPEED 0x4000 +#define MIIM_CIS8204_IMASK_LINK 0x2000 +#define MIIM_CIS8204_IMASK_DUPLEX 0x1000 +#define MIIM_CIS8204_IMASK_MASK 0xf000 + +/* Cicada Interrupt Status Register */ +#define MIIM_CIS8204_ISTAT 0x1a +#define MIIM_CIS8204_ISTAT_STATUS 0x8000 +#define MIIM_CIS8204_ISTAT_SPEED 0x4000 +#define MIIM_CIS8204_ISTAT_LINK 0x2000 +#define MIIM_CIS8204_ISTAT_DUPLEX 0x1000 + +/* Cicada Auxiliary Control/Status Register */ +#define MIIM_CIS8201_AUX_CONSTAT 0x1c +#define MIIM_CIS8201_AUXCONSTAT_INIT 0x0004 +#define MIIM_CIS8201_AUXCONSTAT_DUPLEX 0x0020 +#define MIIM_CIS8201_AUXCONSTAT_SPEED 0x0018 +#define MIIM_CIS8201_AUXCONSTAT_GBIT 0x0010 +#define MIIM_CIS8201_AUXCONSTAT_100 0x0008 + +/* 88E1011 PHY Status Register */ +#define MIIM_88E1011_PHY_STATUS 0x11 +#define MIIM_88E1011_PHYSTAT_SPEED 0xc000 +#define MIIM_88E1011_PHYSTAT_GBIT 0x8000 +#define MIIM_88E1011_PHYSTAT_100 0x4000 +#define MIIM_88E1011_PHYSTAT_DUPLEX 0x2000 +#define MIIM_88E1011_PHYSTAT_LINK 0x0400 + +#define MIIM_88E1011_IEVENT 0x13 +#define MIIM_88E1011_IEVENT_CLEAR 0x0000 + +#define MIIM_88E1011_IMASK 0x12 +#define MIIM_88E1011_IMASK_INIT 0x6400 +#define MIIM_88E1011_IMASK_CLEAR 0x0000 + +/* DM9161 Control register values */ +#define MIIM_DM9161_CR_STOP 0x0400 +#define MIIM_DM9161_CR_RSTAN 0x1200 + +#define MIIM_DM9161_SCR 0x10 +#define MIIM_DM9161_SCR_INIT 0x0610 + +/* DM9161 Specified Configuration and Status Register */ +#define MIIM_DM9161_SCSR 0x11 +#define MIIM_DM9161_SCSR_100F 0x8000 +#define MIIM_DM9161_SCSR_100H 0x4000 +#define MIIM_DM9161_SCSR_10F 0x2000 +#define MIIM_DM9161_SCSR_10H 0x1000 + +/* DM9161 Interrupt Register */ +#define MIIM_DM9161_INTR 0x15 +#define MIIM_DM9161_INTR_PEND 0x8000 +#define MIIM_DM9161_INTR_DPLX_MASK 0x0800 +#define MIIM_DM9161_INTR_SPD_MASK 0x0400 +#define MIIM_DM9161_INTR_LINK_MASK 0x0200 +#define MIIM_DM9161_INTR_MASK 0x0100 +#define MIIM_DM9161_INTR_DPLX_CHANGE 0x0010 +#define MIIM_DM9161_INTR_SPD_CHANGE 0x0008 +#define MIIM_DM9161_INTR_LINK_CHANGE 0x0004 +#define MIIM_DM9161_INTR_INIT 0x0000 +#define MIIM_DM9161_INTR_STOP \ +(MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \ + | MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK) + +/* DM9161 10BT Configuration/Status */ +#define MIIM_DM9161_10BTCSR 0x12 +#define MIIM_DM9161_10BTCSR_INIT 0x7800 + + +#define MIIM_READ_COMMAND 0x00000001 + +/* + * struct phy_cmd: A command for reading or writing a PHY register + * + * mii_reg: The register to read or write + * + * mii_data: For writes, the value to put in the register. + * A value of -1 indicates this is a read. + * + * funct: A function pointer which is invoked for each command. + * For reads, this function will be passed the value read + * from the PHY, and process it. + * For writes, the result of this function will be written + * to the PHY register + */ +struct phy_cmd { + u32 mii_reg; + u32 mii_data; + u16 (*funct) (u16 mii_reg, struct net_device * dev); +}; + +/* struct phy_info: a structure which defines attributes for a PHY + * + * id will contain a number which represents the PHY. During + * startup, the driver will poll the PHY to find out what its + * UID--as defined by registers 2 and 3--is. The 32-bit result + * gotten from the PHY will be shifted right by "shift" bits to + * discard any bits which may change based on revision numbers + * unimportant to functionality + * + * The struct phy_cmd entries represent pointers to an arrays of + * commands which tell the driver what to do to the PHY. + */ +struct phy_info { + u32 id; + char *name; + unsigned int shift; + /* Called to configure the PHY, and modify the controller + * based on the results */ + const struct phy_cmd *config; + + /* Called when starting up the controller. Usually sets + * up the interrupt for state changes */ + const struct phy_cmd *startup; + + /* Called inside the interrupt handler to acknowledge + * the interrupt */ + const struct phy_cmd *ack_int; + + /* Called in the bottom half to handle the interrupt */ + const struct phy_cmd *handle_int; + + /* Called when bringing down the controller. Usually stops + * the interrupts from being generated */ + const struct phy_cmd *shutdown; +}; + +struct phy_info *get_phy_info(struct net_device *dev); +void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd); + +#endif /* GIANFAR_PHY_H */ diff -Nru a/drivers/net/mv64340_eth.c b/drivers/net/mv64340_eth.c --- /dev/null Wed Dec 31 16:00:00 196900 +++ b/drivers/net/mv64340_eth.c 2004-07-13 12:46:19 -07:00 @@ -0,0 +1,2724 @@ +/* + * drivers/net/mv64340_eth.c - Driver for MV64340X ethernet ports + * Copyright (C) 2002 Matthew Dharm + * + * Based on the 64360 driver from: + * Copyright (C) 2002 rabeeh@galileo.co.il + * + * Copyright (C) 2003 PMC-Sierra, Inc., + * written by Manish Lachwani (lachwani@pmc-sierra.com) + * + * Copyright (C) 2003 Ralf Baechle + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "mv64340_eth.h" + +/* + * The first part is the high level driver of the gigE ethernet ports. + */ + +/* Definition for configuring driver */ +#undef MV64340_RX_QUEUE_FILL_ON_TASK + +/* Constants */ +#define EXTRA_BYTES 32 +#define WRAP ETH_HLEN + 2 + 4 + 16 +#define BUFFER_MTU dev->mtu + WRAP +#define INT_CAUSE_UNMASK_ALL 0x0007ffff +#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff +#ifdef MV64340_RX_FILL_ON_TASK +#define INT_CAUSE_MASK_ALL 0x00000000 +#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL +#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT +#endif + +/* Static function declarations */ +static int mv64340_eth_real_open(struct net_device *); +static int mv64340_eth_real_stop(struct net_device *); +static int mv64340_eth_change_mtu(struct net_device *, int); +static struct net_device_stats *mv64340_eth_get_stats(struct net_device *); +static void eth_port_init_mac_tables(unsigned int eth_port_num); +#ifdef MV64340_NAPI +static int mv64340_poll(struct net_device *dev, int *budget); +#endif + +unsigned char prom_mac_addr_base[6]; +unsigned long mv64340_sram_base; + +/* + * Changes MTU (maximum transfer unit) of the gigabit ethenret port + * + * Input : pointer to ethernet interface network device structure + * new mtu size + * Output : 0 upon success, -EINVAL upon failure + */ +static int mv64340_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mv64340_private *mp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&mp->lock, flags); + + if ((new_mtu > 9500) || (new_mtu < 64)) { + spin_unlock_irqrestore(&mp->lock, flags); + return -EINVAL; + } + + dev->mtu = new_mtu; + /* + * Stop then re-open the interface. This will allocate RX skb's with + * the new MTU. + * There is a possible danger that the open will not successed, due + * to memory is full, which might fail the open function. + */ + if (netif_running(dev)) { + if (mv64340_eth_real_stop(dev)) + printk(KERN_ERR + "%s: Fatal error on stopping device\n", + dev->name); + if (mv64340_eth_real_open(dev)) + printk(KERN_ERR + "%s: Fatal error on opening device\n", + dev->name); + } + + spin_unlock_irqrestore(&mp->lock, flags); + return 0; +} + +/* + * mv64340_eth_rx_task + * + * Fills / refills RX queue on a certain gigabit ethernet port + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void mv64340_eth_rx_task(void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct mv64340_private *mp = netdev_priv(dev); + struct pkt_info pkt_info; + struct sk_buff *skb; + + if (test_and_set_bit(0, &mp->rx_task_busy)) + panic("%s: Error in test_set_bit / clear_bit", dev->name); + + while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { + /* The +8 for buffer allignment and another 32 byte extra */ + + skb = dev_alloc_skb(BUFFER_MTU + 8 + EXTRA_BYTES); + if (!skb) + /* Better luck next time */ + break; + mp->rx_ring_skbs++; + pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; + pkt_info.byte_cnt = dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES; + /* Allign buffer to 8 bytes */ + if (pkt_info.byte_cnt & ~0x7) { + pkt_info.byte_cnt &= ~0x7; + pkt_info.byte_cnt += 8; + } + pkt_info.buf_ptr = + pci_map_single(0, skb->data, + dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES, + PCI_DMA_FROMDEVICE); + pkt_info.return_info = skb; + if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { + printk(KERN_ERR + "%s: Error allocating RX Ring\n", dev->name); + break; + } + skb_reserve(skb, 2); + } + clear_bit(0, &mp->rx_task_busy); + /* + * If RX ring is empty of SKB, set a timer to try allocating + * again in a later time . + */ + if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) { + printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); + /* After 100mSec */ + mp->timeout.expires = jiffies + (HZ / 10); + add_timer(&mp->timeout); + mp->rx_timer_flag = 1; + } +#if MV64340_RX_QUEUE_FILL_ON_TASK + else { + /* Return interrupts */ + MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(mp->port_num), + INT_CAUSE_UNMASK_ALL); + } +#endif +} + +/* + * mv64340_eth_rx_task_timer_wrapper + * + * Timer routine to wake up RX queue filling task. This function is + * used only in case the RX queue is empty, and all alloc_skb has + * failed (due to out of memory event). + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void mv64340_eth_rx_task_timer_wrapper(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct mv64340_private *mp = netdev_priv(dev); + + mp->rx_timer_flag = 0; + mv64340_eth_rx_task((void *) data); +} + + +/* + * mv64340_eth_update_mac_address + * + * Update the MAC address of the port in the address table + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void mv64340_eth_update_mac_address(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + unsigned int port_num = mp->port_num; + + eth_port_init_mac_tables(port_num); + memcpy(mp->port_mac_addr, dev->dev_addr, 6); + eth_port_uc_addr_set(port_num, mp->port_mac_addr); +} + +/* + * mv64340_eth_set_rx_mode + * + * Change from promiscuos to regular rx mode + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void mv64340_eth_set_rx_mode(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + + if (dev->flags & IFF_PROMISC) { + ethernet_set_config_reg + (mp->port_num, + ethernet_get_config_reg(mp->port_num) | + ETH_UNICAST_PROMISCUOUS_MODE); + } else { + ethernet_set_config_reg + (mp->port_num, + ethernet_get_config_reg(mp->port_num) & + ~(unsigned int) ETH_UNICAST_PROMISCUOUS_MODE); + } +} + + +/* + * mv64340_eth_set_mac_address + * + * Change the interface's mac address. + * No special hardware thing should be done because interface is always + * put in promiscuous mode. + * + * Input : pointer to ethernet interface network device structure and + * a pointer to the designated entry to be added to the cache. + * Output : zero upon success, negative upon failure + */ +static int mv64340_eth_set_mac_address(struct net_device *dev, void *addr) +{ + int i; + + for (i = 0; i < 6; i++) + /* +2 is for the offset of the HW addr type */ + dev->dev_addr[i] = ((unsigned char *) addr)[i + 2]; + mv64340_eth_update_mac_address(dev); + return 0; +} + +/* + * mv64340_eth_tx_timeout + * + * Called upon a timeout on transmitting a packet + * + * Input : pointer to ethernet interface network device structure. + * Output : N/A + */ +static void mv64340_eth_tx_timeout(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + + printk(KERN_INFO "%s: TX timeout ", dev->name); + + /* Do the reset outside of interrupt context */ + schedule_work(&mp->tx_timeout_task); +} + +/* + * mv64340_eth_tx_timeout_task + * + * Actual routine to reset the adapter when a timeout on Tx has occurred + */ +static void mv64340_eth_tx_timeout_task(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + + netif_device_detach(dev); + eth_port_reset(mp->port_num); + eth_port_start(mp); + netif_device_attach(dev); +} + +/* + * mv64340_eth_free_tx_queue + * + * Input : dev - a pointer to the required interface + * + * Output : 0 if was able to release skb , nonzero otherwise + */ +static int mv64340_eth_free_tx_queue(struct net_device *dev, + unsigned int eth_int_cause_ext) +{ + struct mv64340_private *mp = netdev_priv(dev); + struct net_device_stats *stats = &mp->stats; + struct pkt_info pkt_info; + int released = 1; + + if (!(eth_int_cause_ext & (BIT0 | BIT8))) + return released; + + spin_lock(&mp->lock); + + /* Check only queue 0 */ + while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { + if (pkt_info.cmd_sts & BIT0) { + printk("%s: Error in TX\n", dev->name); + stats->tx_errors++; + } + + /* + * If return_info is different than 0, release the skb. + * The case where return_info is not 0 is only in case + * when transmitted a scatter/gather packet, where only + * last skb releases the whole chain. + */ + if (pkt_info.return_info) { + dev_kfree_skb_irq((struct sk_buff *) + pkt_info.return_info); + released = 0; + if (skb_shinfo(pkt_info.return_info)->nr_frags) + pci_unmap_page(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, PCI_DMA_TODEVICE); + + if (mp->tx_ring_skbs != 1) + mp->tx_ring_skbs--; + } else + pci_unmap_page(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, PCI_DMA_TODEVICE); + + /* + * Decrement the number of outstanding skbs counter on + * the TX queue. + */ + if (mp->tx_ring_skbs == 0) + panic("ERROR - TX outstanding SKBs counter is corrupted"); + + } + + spin_unlock(&mp->lock); + + return released; +} + +/* + * mv64340_eth_receive + * + * This function is forward packets that are received from the port's + * queues toward kernel core or FastRoute them to another interface. + * + * Input : dev - a pointer to the required interface + * max - maximum number to receive (0 means unlimted) + * + * Output : number of served packets + */ +#ifdef MV64340_NAPI +static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max, + int budget) +#else +static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max) +#endif +{ + struct mv64340_private *mp = netdev_priv(dev); + struct net_device_stats *stats = &mp->stats; + unsigned int received_packets = 0; + struct sk_buff *skb; + struct pkt_info pkt_info; + +#ifdef MV64340_NAPI + while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) { +#else + while ((--max) && eth_port_receive(mp, &pkt_info) == ETH_OK) { +#endif + mp->rx_ring_skbs--; + received_packets++; +#ifdef MV64340_NAPI + budget--; +#endif + /* Update statistics. Note byte count includes 4 byte CRC count */ + stats->rx_packets++; + stats->rx_bytes += pkt_info.byte_cnt; + skb = (struct sk_buff *) pkt_info.return_info; + /* + * In case received a packet without first / last bits on OR + * the error summary bit is on, the packets needs to be dropeed. + */ + if (((pkt_info.cmd_sts + & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) != + (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) + || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) { + stats->rx_dropped++; + if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC | + ETH_RX_LAST_DESC)) != + (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) { + if (net_ratelimit()) + printk(KERN_ERR + "%s: Received packet spread on multiple" + " descriptors\n", + dev->name); + } + if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) + stats->rx_errors++; + + dev_kfree_skb_irq(skb); + } else { + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, pkt_info.byte_cnt - 4); + skb->dev = dev; + + if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum = htons((pkt_info.cmd_sts + & 0x0007fff8) >> 3); + } + skb->protocol = eth_type_trans(skb, dev); +#ifdef MV64340_NAPI + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif + } + } + + return received_packets; +} + +/* + * mv64340_eth_int_handler + * + * Main interrupt handler for the gigbit ethernet ports + * + * Input : irq - irq number (not used) + * dev_id - a pointer to the required interface's data structure + * regs - not used + * Output : N/A + */ + +static irqreturn_t mv64340_eth_int_handler(int irq, void *dev_id, + struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct mv64340_private *mp = netdev_priv(dev); + u32 eth_int_cause, eth_int_cause_ext = 0; + unsigned int port_num = mp->port_num; + + /* Read interrupt cause registers */ + eth_int_cause = MV_READ(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num)) & + INT_CAUSE_UNMASK_ALL; + + if (eth_int_cause & BIT1) + eth_int_cause_ext = + MV_READ(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & + INT_CAUSE_UNMASK_ALL_EXT; + +#ifdef MV64340_NAPI + if (!(eth_int_cause & 0x0007fffd)) { + /* Dont ack the Rx interrupt */ +#endif + /* + * Clear specific ethernet port intrerrupt registers by + * acknowleding relevant bits. + */ + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), + ~eth_int_cause); + if (eth_int_cause_ext != 0x0) + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), + ~eth_int_cause_ext); + + /* UDP change : We may need this */ + if ((eth_int_cause_ext & 0x0000ffff) && + (mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && + (MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1)) + netif_wake_queue(dev); +#ifdef MV64340_NAPI + } else { + if (netif_rx_schedule_prep(dev)) { + /* Mask all the interrupts */ + MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),0); + MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0); + __netif_rx_schedule(dev); + } +#else + { + if (eth_int_cause & (BIT2 | BIT11)) + mv64340_eth_receive_queue(dev, 0); + + /* + * After forwarded received packets to upper layer, add a task + * in an interrupts enabled context that refills the RX ring + * with skb's. + */ +#if MV64340_RX_QUEUE_FILL_ON_TASK + /* Unmask all interrupts on ethernet port */ + MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), + INT_CAUSE_MASK_ALL); + queue_task(&mp->rx_task, &tq_immediate); + mark_bh(IMMEDIATE_BH); +#else + mp->rx_task.func(dev); +#endif +#endif + } + /* PHY status changed */ + if (eth_int_cause_ext & (BIT16 | BIT20)) { + unsigned int phy_reg_data; + + /* Check Link status on ethernet port */ + eth_port_read_smi_reg(port_num, 1, &phy_reg_data); + if (!(phy_reg_data & 0x20)) { + netif_stop_queue(dev); + } else { + netif_wake_queue(dev); + + /* + * Start all TX queues on ethernet port. This is good in + * case of previous packets where not transmitted, due + * to link down and this command re-enables all TX + * queues. + * Note that it is possible to get a TX resource error + * interrupt after issuing this, since not all TX queues + * are enabled, or has anything to send. + */ + MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 1); + } + } + + /* + * If no real interrupt occured, exit. + * This can happen when using gigE interrupt coalescing mechanism. + */ + if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0)) + return IRQ_NONE; + + return IRQ_HANDLED; +} + +#ifdef MV64340_COAL + +/* + * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path + * + * DESCRIPTION: + * This routine sets the RX coalescing interrupt mechanism parameter. + * This parameter is a timeout counter, that counts in 64 t_clk + * chunks ; that when timeout event occurs a maskable interrupt + * occurs. + * The parameter is calculated using the tClk of the MV-643xx chip + * , and the required delay of the interrupt in usec. + * + * INPUT: + * unsigned int eth_port_num Ethernet port number + * unsigned int t_clk t_clk of the MV-643xx chip in HZ units + * unsigned int delay Delay in usec + * + * OUTPUT: + * Interrupt coalescing mechanism value is set in MV-643xx chip. + * + * RETURN: + * The interrupt coalescing value set in the gigE port. + * + */ +static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, + unsigned int t_clk, unsigned int delay) +{ + unsigned int coal = ((t_clk / 1000000) * delay) / 64; + + /* Set RX Coalescing mechanism */ + MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num), + ((coal & 0x3fff) << 8) | + (MV_READ(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num)) + & 0xffc000ff)); + + return coal; +} +#endif + +/* + * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path + * + * DESCRIPTION: + * This routine sets the TX coalescing interrupt mechanism parameter. + * This parameter is a timeout counter, that counts in 64 t_clk + * chunks ; that when timeout event occurs a maskable interrupt + * occurs. + * The parameter is calculated using the t_cLK frequency of the + * MV-643xx chip and the required delay in the interrupt in uSec + * + * INPUT: + * unsigned int eth_port_num Ethernet port number + * unsigned int t_clk t_clk of the MV-643xx chip in HZ units + * unsigned int delay Delay in uSeconds + * + * OUTPUT: + * Interrupt coalescing mechanism value is set in MV-643xx chip. + * + * RETURN: + * The interrupt coalescing value set in the gigE port. + * + */ +static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, + unsigned int t_clk, unsigned int delay) +{ + unsigned int coal; + coal = ((t_clk / 1000000) * delay) / 64; + /* Set TX Coalescing mechanism */ + MV_WRITE(MV64340_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), + coal << 4); + return coal; +} + +/* + * mv64340_eth_open + * + * This function is called when openning the network device. The function + * should initialize all the hardware, initialize cyclic Rx/Tx + * descriptors chain and buffers and allocate an IRQ to the network + * device. + * + * Input : a pointer to the network device structure + * + * Output : zero of success , nonzero if fails. + */ + +static int mv64340_eth_open(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + unsigned int port_num = mp->port_num; + int err = err; + + spin_lock_irq(&mp->lock); + + err = request_irq(dev->irq, mv64340_eth_int_handler, + SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev); + + if (err) { + printk(KERN_ERR "Can not assign IRQ number to MV64340_eth%d\n", + port_num); + err = -EAGAIN; + goto out; + } + + if (mv64340_eth_real_open(dev)) { + printk("%s: Error opening interface\n", dev->name); + err = -EBUSY; + goto out_free; + } + + spin_unlock_irq(&mp->lock); + + return 0; + +out_free: + free_irq(dev->irq, dev); + +out: + spin_unlock_irq(&mp->lock); + + return err; +} + +/* + * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. + * + * DESCRIPTION: + * This function prepares a Rx chained list of descriptors and packet + * buffers in a form of a ring. The routine must be called after port + * initialization routine and before port start routine. + * The Ethernet SDMA engine uses CPU bus addresses to access the various + * devices in the system (i.e. DRAM). This function uses the ethernet + * struct 'virtual to physical' routine (set by the user) to set the ring + * with physical addresses. + * + * INPUT: + * struct mv64340_private *mp Ethernet Port Control srtuct. + * int rx_desc_num Number of Rx descriptors + * int rx_buff_size Size of Rx buffer + * unsigned int rx_desc_base_addr Rx descriptors memory area base addr. + * unsigned int rx_buff_base_addr Rx buffer memory area base addr. + * + * OUTPUT: + * The routine updates the Ethernet port control struct with information + * regarding the Rx descriptors and buffers. + * + * RETURN: + * false if the given descriptors memory area is not aligned according to + * Ethernet SDMA specifications. + * true otherwise. + */ +static int ether_init_rx_desc_ring(struct mv64340_private * mp, + unsigned long rx_buff_base_addr) +{ + unsigned long buffer_addr = rx_buff_base_addr; + volatile struct eth_rx_desc *p_rx_desc; + int rx_desc_num = mp->rx_ring_size; + unsigned long rx_desc_base_addr = (unsigned long) mp->p_rx_desc_area; + int rx_buff_size = 1536; /* Dummy, will be replaced later */ + int i; + + p_rx_desc = (struct eth_rx_desc *) rx_desc_base_addr; + + /* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */ + if (rx_buff_base_addr & 0xf) + return 0; + + /* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes */ + if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE)) + return 0; + + /* Rx buffers must be 64-bit aligned. */ + if ((rx_buff_base_addr + rx_buff_size) & 0x7) + return 0; + + /* initialize the Rx descriptors ring */ + for (i = 0; i < rx_desc_num; i++) { + p_rx_desc[i].buf_size = rx_buff_size; + p_rx_desc[i].byte_cnt = 0x0000; + p_rx_desc[i].cmd_sts = + ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT; + p_rx_desc[i].next_desc_ptr = + (struct eth_rx_desc *) mp->rx_desc_dma + + (i + 1) % rx_desc_num; + p_rx_desc[i].buf_ptr = buffer_addr; + + mp->rx_skb[i] = NULL; + buffer_addr += rx_buff_size; + } + + /* Save Rx desc pointer to driver struct. */ + mp->rx_curr_desc_q = 0; + mp->rx_used_desc_q = 0; + + mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); + + mp->port_rx_queue_command |= 1; + + return 1; +} + +/* + * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory. + * + * DESCRIPTION: + * This function prepares a Tx chained list of descriptors and packet + * buffers in a form of a ring. The routine must be called after port + * initialization routine and before port start routine. + * The Ethernet SDMA engine uses CPU bus addresses to access the various + * devices in the system (i.e. DRAM). This function uses the ethernet + * struct 'virtual to physical' routine (set by the user) to set the ring + * with physical addresses. + * + * INPUT: + * struct mv64340_private *mp Ethernet Port Control srtuct. + * int tx_desc_num Number of Tx descriptors + * int tx_buff_size Size of Tx buffer + * unsigned int tx_desc_base_addr Tx descriptors memory area base addr. + * + * OUTPUT: + * The routine updates the Ethernet port control struct with information + * regarding the Tx descriptors and buffers. + * + * RETURN: + * false if the given descriptors memory area is not aligned according to + * Ethernet SDMA specifications. + * true otherwise. + */ +static int ether_init_tx_desc_ring(struct mv64340_private *mp) +{ + unsigned long tx_desc_base_addr = (unsigned long) mp->p_tx_desc_area; + int tx_desc_num = mp->tx_ring_size; + struct eth_tx_desc *p_tx_desc; + int i; + + /* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */ + if (tx_desc_base_addr & 0xf) + return 0; + + /* save the first desc pointer to link with the last descriptor */ + p_tx_desc = (struct eth_tx_desc *) tx_desc_base_addr; + + /* Initialize the Tx descriptors ring */ + for (i = 0; i < tx_desc_num; i++) { + p_tx_desc[i].byte_cnt = 0x0000; + p_tx_desc[i].l4i_chk = 0x0000; + p_tx_desc[i].cmd_sts = 0x00000000; + p_tx_desc[i].next_desc_ptr = + (struct eth_tx_desc *) mp->tx_desc_dma + + (i + 1) % tx_desc_num; + p_tx_desc[i].buf_ptr = 0x00000000; + mp->tx_skb[i] = NULL; + } + + /* Set Tx desc pointer in driver struct. */ + mp->tx_curr_desc_q = 0; + mp->tx_used_desc_q = 0; +#ifdef MV64340_CHECKSUM_OFFLOAD_TX + mp->tx_first_desc_q = 0; +#endif + /* Init Tx ring base and size parameters */ + mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); + + /* Add the queue to the list of Tx queues of this port */ + mp->port_tx_queue_command |= 1; + + return 1; +} + +/* Helper function for mv64340_eth_open */ +static int mv64340_eth_real_open(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + unsigned int port_num = mp->port_num; + u32 phy_reg_data; + unsigned int size; + + /* Stop RX Queues */ + MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), + 0x0000ff00); + + /* Clear the ethernet port interrupts */ + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0); + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); + + /* Unmask RX buffer and TX end interrupt */ + MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL); + + /* Unmask phy and link status changes interrupts */ + MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL_EXT); + + /* Set the MAC Address */ + memcpy(mp->port_mac_addr, dev->dev_addr, 6); + + eth_port_init(mp); + + INIT_WORK(&mp->rx_task, (void (*)(void *)) mv64340_eth_rx_task, dev); + + memset(&mp->timeout, 0, sizeof(struct timer_list)); + mp->timeout.function = mv64340_eth_rx_task_timer_wrapper; + mp->timeout.data = (unsigned long) dev; + + mp->rx_task_busy = 0; + mp->rx_timer_flag = 0; + + /* Allocate TX ring */ + mp->tx_ring_skbs = 0; + mp->tx_ring_size = MV64340_TX_QUEUE_SIZE; + size = mp->tx_ring_size * sizeof(struct eth_tx_desc); + mp->tx_desc_area_size = size; + + /* Assumes allocated ring is 16 bytes alligned */ + mp->p_tx_desc_area = pci_alloc_consistent(NULL, size, &mp->tx_desc_dma); + if (!mp->p_tx_desc_area) { + printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", + dev->name, size); + return -ENOMEM; + } + memset((void *) mp->p_tx_desc_area, 0, mp->tx_desc_area_size); + + /* Dummy will be replaced upon real tx */ + ether_init_tx_desc_ring(mp); + + /* Allocate RX ring */ + /* Meantime RX Ring are fixed - but must be configurable by user */ + mp->rx_ring_size = MV64340_RX_QUEUE_SIZE; + mp->rx_ring_skbs = 0; + size = mp->rx_ring_size * sizeof(struct eth_rx_desc); + mp->rx_desc_area_size = size; + + /* Assumes allocated ring is 16 bytes aligned */ + + mp->p_rx_desc_area = pci_alloc_consistent(NULL, size, &mp->rx_desc_dma); + + if (!mp->p_rx_desc_area) { + printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n", + dev->name, size); + printk(KERN_ERR "%s: Freeing previously allocated TX queues...", + dev->name); + pci_free_consistent(0, mp->tx_desc_area_size, + (void *) mp->p_tx_desc_area, + mp->tx_desc_dma); + return -ENOMEM; + } + memset(mp->p_rx_desc_area, 0, size); + + if (!(ether_init_rx_desc_ring(mp, 0))) + panic("%s: Error initializing RX Ring", dev->name); + + mv64340_eth_rx_task(dev); /* Fill RX ring with skb's */ + + eth_port_start(mp); + + /* Interrupt Coalescing */ + +#ifdef MV64340_COAL + mp->rx_int_coal = + eth_port_set_rx_coal(port_num, 133000000, MV64340_RX_COAL); +#endif + + mp->tx_int_coal = + eth_port_set_tx_coal (port_num, 133000000, MV64340_TX_COAL); + + /* Increase the Rx side buffer size */ + + MV_WRITE (MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num), (0x5 << 17) | + (MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num)) + & 0xfff1ffff)); + + /* Check Link status on phy */ + eth_port_read_smi_reg(port_num, 1, &phy_reg_data); + if (!(phy_reg_data & 0x20)) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + return 0; +} + +static void mv64340_eth_free_tx_rings(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + unsigned int port_num = mp->port_num; + unsigned int curr; + + /* Stop Tx Queues */ + MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), + 0x0000ff00); + + /* Free TX rings */ + /* Free outstanding skb's on TX rings */ + for (curr = 0; + (mp->tx_ring_skbs) && (curr < MV64340_TX_QUEUE_SIZE); + curr++) { + if (mp->tx_skb[curr]) { + dev_kfree_skb(mp->tx_skb[curr]); + mp->tx_ring_skbs--; + } + } + if (mp->tx_ring_skbs != 0) + printk("%s: Error on Tx descriptor free - could not free %d" + " descriptors\n", dev->name, + mp->tx_ring_skbs); + pci_free_consistent(0, mp->tx_desc_area_size, + (void *) mp->p_tx_desc_area, mp->tx_desc_dma); +} + +static void mv64340_eth_free_rx_rings(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + unsigned int port_num = mp->port_num; + int curr; + + /* Stop RX Queues */ + MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), + 0x0000ff00); + + /* Free RX rings */ + /* Free preallocated skb's on RX rings */ + for (curr = 0; + mp->rx_ring_skbs && (curr < MV64340_RX_QUEUE_SIZE); + curr++) { + if (mp->rx_skb[curr]) { + dev_kfree_skb(mp->rx_skb[curr]); + mp->rx_ring_skbs--; + } + } + + if (mp->rx_ring_skbs != 0) + printk(KERN_ERR + "%s: Error in freeing Rx Ring. %d skb's still" + " stuck in RX Ring - ignoring them\n", dev->name, + mp->rx_ring_skbs); + pci_free_consistent(0, mp->rx_desc_area_size, + (void *) mp->p_rx_desc_area, + mp->rx_desc_dma); +} + +/* + * mv64340_eth_stop + * + * This function is used when closing the network device. + * It updates the hardware, + * release all memory that holds buffers and descriptors and release the IRQ. + * Input : a pointer to the device structure + * Output : zero if success , nonzero if fails + */ + +/* Helper function for mv64340_eth_stop */ + +static int mv64340_eth_real_stop(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + unsigned int port_num = mp->port_num; + + netif_stop_queue(dev); + + mv64340_eth_free_tx_rings(dev); + mv64340_eth_free_rx_rings(dev); + + eth_port_reset(mp->port_num); + + /* Disable ethernet port interrupts */ + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0); + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); + + /* Mask RX buffer and TX end interrupt */ + MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), 0); + + /* Mask phy and link status changes interrupts */ + MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0); + + return 0; +} + +static int mv64340_eth_stop(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + + spin_lock_irq(&mp->lock); + + mv64340_eth_real_stop(dev); + + free_irq(dev->irq, dev); + spin_unlock_irq(&mp->lock); + + return 0; +} + +#ifdef MV64340_NAPI +static void mv64340_tx(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + struct pkt_info pkt_info; + + while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { + if (pkt_info.return_info) { + dev_kfree_skb_irq((struct sk_buff *) + pkt_info.return_info); + if (skb_shinfo(pkt_info.return_info)->nr_frags) + pci_unmap_page(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, + PCI_DMA_TODEVICE); + + if (mp->tx_ring_skbs != 1) + mp->tx_ring_skbs--; + } else + pci_unmap_page(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt, + PCI_DMA_TODEVICE); + } + + if (netif_queue_stopped(dev) && + MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1) + netif_wake_queue(dev); +} + +/* + * mv64340_poll + * + * This function is used in case of NAPI + */ +static int mv64340_poll(struct net_device *dev, int *budget) +{ + struct mv64340_private *mp = netdev_priv(dev); + int done = 1, orig_budget, work_done; + unsigned int port_num = mp->port_num; + unsigned long flags; + +#ifdef MV64340_TX_FAST_REFILL + if (++mp->tx_clean_threshold > 5) { + spin_lock_irqsave(&mp->lock, flags); + mv64340_tx(dev); + mp->tx_clean_threshold = 0; + spin_unlock_irqrestore(&mp->lock, flags); + } +#endif + + if ((u32)(MV_READ(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) != (u32)mp->rx_used_desc_q) { + orig_budget = *budget; + if (orig_budget > dev->quota) + orig_budget = dev->quota; + work_done = mv64340_eth_receive_queue(dev, 0, orig_budget); + mp->rx_task.func(dev); + *budget -= work_done; + dev->quota -= work_done; + if (work_done >= orig_budget) + done = 0; + } + + if (done) { + spin_lock_irqsave(&mp->lock, flags); + __netif_rx_complete(dev); + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),0); + MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),0); + MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL); + MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL_EXT); + spin_unlock_irqrestore(&mp->lock, flags); + } + + return done ? 0 : 1; +} +#endif + +/* + * mv64340_eth_start_xmit + * + * This function is queues a packet in the Tx descriptor for + * required port. + * + * Input : skb - a pointer to socket buffer + * dev - a pointer to the required port + * + * Output : zero upon success + */ +static int mv64340_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + struct net_device_stats *stats = &mp->stats; + ETH_FUNC_RET_STATUS status; + unsigned long flags; + struct pkt_info pkt_info; + + if (netif_queue_stopped(dev)) { + printk(KERN_ERR + "%s: Tried sending packet when interface is stopped\n", + dev->name); + return 1; + } + + /* This is a hard error, log it. */ + if ((MV64340_TX_QUEUE_SIZE - mp->tx_ring_skbs) <= + (skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + printk(KERN_ERR + "%s: Bug in mv64340_eth - Trying to transmit when" + " queue full !\n", dev->name); + return 1; + } + + /* Paranoid check - this shouldn't happen */ + if (skb == NULL) { + stats->tx_dropped++; + return 1; + } + + spin_lock_irqsave(&mp->lock, flags); + + /* Update packet info data structure -- DMA owned, first last */ +#ifdef MV64340_CHECKSUM_OFFLOAD_TX + if (!skb_shinfo(skb)->nr_frags || (skb_shinfo(skb)->nr_frags > 3)) { +#endif + pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | + ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC; + + pkt_info.byte_cnt = skb->len; + pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len, + PCI_DMA_TODEVICE); + + + pkt_info.return_info = skb; + status = eth_port_send(mp, &pkt_info); + if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) + printk(KERN_ERR "%s: Error on transmitting packet\n", + dev->name); + mp->tx_ring_skbs++; +#ifdef MV64340_CHECKSUM_OFFLOAD_TX + } else { + unsigned int frag; + u32 ipheader; + + /* first frag which is skb header */ + pkt_info.byte_cnt = skb_headlen(skb); + pkt_info.buf_ptr = pci_map_single(0, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + pkt_info.return_info = 0; + ipheader = skb->nh.iph->ihl << 11; + pkt_info.cmd_sts = ETH_TX_FIRST_DESC | + ETH_GEN_TCP_UDP_CHECKSUM | + ETH_GEN_IP_V_4_CHECKSUM | + ipheader; + /* CPU already calculated pseudo header checksum. So, use it */ + pkt_info.l4i_chk = skb->h.th->check; + status = eth_port_send(mp, &pkt_info); + if (status != ETH_OK) { + if ((status == ETH_ERROR)) + printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name); + if (status == ETH_QUEUE_FULL) + printk("Error on Queue Full \n"); + if (status == ETH_QUEUE_LAST_RESOURCE) + printk("Tx resource error \n"); + } + + /* Check for the remaining frags */ + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; + pkt_info.l4i_chk = 0x0000; + pkt_info.cmd_sts = 0x00000000; + + /* Last Frag enables interrupt and frees the skb */ + if (frag == (skb_shinfo(skb)->nr_frags - 1)) { + pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT | + ETH_TX_LAST_DESC; + pkt_info.return_info = skb; + mp->tx_ring_skbs++; + } + else { + pkt_info.return_info = 0; + } + pkt_info.byte_cnt = this_frag->size; + if (this_frag->size < 8) + printk("%d : \n", skb_shinfo(skb)->nr_frags); + + pkt_info.buf_ptr = pci_map_page(NULL, this_frag->page, + this_frag->page_offset, + this_frag->size, PCI_DMA_TODEVICE); + + status = eth_port_send(mp, &pkt_info); + + if (status != ETH_OK) { + if ((status == ETH_ERROR)) + printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name); + + if (status == ETH_QUEUE_LAST_RESOURCE) + printk("Tx resource error \n"); + + if (status == ETH_QUEUE_FULL) + printk("Queue is full \n"); + } + } + } +#endif + + /* Check if TX queue can handle another skb. If not, then + * signal higher layers to stop requesting TX + */ + if (MV64340_TX_QUEUE_SIZE <= (mp->tx_ring_skbs + 1)) + /* + * Stop getting skb's from upper layers. + * Getting skb's from upper layers will be enabled again after + * packets are released. + */ + netif_stop_queue(dev); + + /* Update statistics and start of transmittion time */ + stats->tx_bytes += skb->len; + stats->tx_packets++; + dev->trans_start = jiffies; + + spin_unlock_irqrestore(&mp->lock, flags); + + return 0; /* success */ +} + +/* + * mv64340_eth_get_stats + * + * Returns a pointer to the interface statistics. + * + * Input : dev - a pointer to the required interface + * + * Output : a pointer to the interface's statistics + */ + +static struct net_device_stats *mv64340_eth_get_stats(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + + return &mp->stats; +} + +/*/ + * mv64340_eth_init + * + * First function called after registering the network device. + * It's purpose is to initialize the device as an ethernet device, + * fill the structure that was given in registration with pointers + * to functions, and setting the MAC address of the interface + * + * Input : number of port to initialize + * Output : -ENONMEM if failed , 0 if success + */ +static struct net_device *mv64340_eth_init(int port_num) +{ + struct mv64340_private *mp; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct mv64340_private)); + if (!dev) + return NULL; + + mp = netdev_priv(dev); + + dev->irq = ETH_PORT0_IRQ_NUM + port_num; + + dev->open = mv64340_eth_open; + dev->stop = mv64340_eth_stop; + dev->hard_start_xmit = mv64340_eth_start_xmit; + dev->get_stats = mv64340_eth_get_stats; + dev->set_mac_address = mv64340_eth_set_mac_address; + dev->set_multicast_list = mv64340_eth_set_rx_mode; + + /* No need to Tx Timeout */ + dev->tx_timeout = mv64340_eth_tx_timeout; +#ifdef MV64340_NAPI + dev->poll = mv64340_poll; + dev->weight = 64; +#endif + + dev->watchdog_timeo = 2 * HZ; + dev->tx_queue_len = MV64340_TX_QUEUE_SIZE; + dev->base_addr = 0; + dev->change_mtu = mv64340_eth_change_mtu; + +#ifdef MV64340_CHECKSUM_OFFLOAD_TX +#ifdef MAX_SKB_FRAGS +#ifndef CONFIG_JAGUAR_DMALOW + /* + * Zero copy can only work if we use Discovery II memory. Else, we will + * have to map the buffers to ISA memory which is only 16 MB + */ + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM; +#endif +#endif +#endif + + mp->port_num = port_num; + + /* Configure the timeout task */ + INIT_WORK(&mp->tx_timeout_task, + (void (*)(void *))mv64340_eth_tx_timeout_task, dev); + + spin_lock_init(&mp->lock); + + /* set MAC addresses */ + memcpy(dev->dev_addr, prom_mac_addr_base, 6); + dev->dev_addr[5] += port_num; + + err = register_netdev(dev); + if (err) + goto out_free_dev; + + printk(KERN_NOTICE "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", + dev->name, port_num, + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + + if (dev->features & NETIF_F_SG) + printk("Scatter Gather Enabled "); + + if (dev->features & NETIF_F_IP_CSUM) + printk("TX TCP/IP Checksumming Supported \n"); + + printk("RX TCP/UDP Checksum Offload ON, \n"); + printk("TX and RX Interrupt Coalescing ON \n"); + +#ifdef MV64340_NAPI + printk("RX NAPI Enabled \n"); +#endif + + return dev; + +out_free_dev: + free_netdev(dev); + + return NULL; +} + +static void mv64340_eth_remove(struct net_device *dev) +{ + struct mv64340_private *mp = netdev_priv(dev); + + unregister_netdev(dev); + flush_scheduled_work(); + free_netdev(dev); +} + +static struct net_device *mv64340_dev0; +static struct net_device *mv64340_dev1; +static struct net_device *mv64340_dev2; + +/* + * mv64340_init_module + * + * Registers the network drivers into the Linux kernel + * + * Input : N/A + * + * Output : N/A + */ +static int __init mv64340_init_module(void) +{ + printk(KERN_NOTICE "MV-64340 10/100/1000 Ethernet Driver\n"); + +#ifdef CONFIG_MV64340_ETH_0 + mv64340_dev0 = mv64340_eth_init(0); + if (!mv64340_dev0) { + printk(KERN_ERR + "Error registering MV-64360 ethernet port 0\n"); + } +#endif +#ifdef CONFIG_MV64340_ETH_1 + mv64340_dev1 = mv64340_eth_init(1); + if (!mv64340_dev1) { + printk(KERN_ERR + "Error registering MV-64360 ethernet port 1\n"); + } +#endif +#ifdef CONFIG_MV64340_ETH_2 + mv64340_dev2 = mv64340_eth_init(2); + if (!mv64340_dev2) { + printk(KERN_ERR + "Error registering MV-64360 ethernet port 2\n"); + } +#endif + return 0; +} + +/* + * mv64340_cleanup_module + * + * Registers the network drivers into the Linux kernel + * + * Input : N/A + * + * Output : N/A + */ +static void __exit mv64340_cleanup_module(void) +{ + if (mv64340_dev2) + mv64340_eth_remove(mv64340_dev2); + if (mv64340_dev1) + mv64340_eth_remove(mv64340_dev1); + if (mv64340_dev0) + mv64340_eth_remove(mv64340_dev0); +} + +module_init(mv64340_init_module); +module_exit(mv64340_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm and Manish Lachwani"); +MODULE_DESCRIPTION("Ethernet driver for Marvell MV64340"); + +/* + * The second part is the low level driver of the gigE ethernet ports. + */ + +/* + * Marvell's Gigabit Ethernet controller low level driver + * + * DESCRIPTION: + * This file introduce low level API to Marvell's Gigabit Ethernet + * controller. This Gigabit Ethernet Controller driver API controls + * 1) Operations (i.e. port init, start, reset etc'). + * 2) Data flow (i.e. port send, receive etc'). + * Each Gigabit Ethernet port is controlled via + * struct mv64340_private. + * This struct includes user configuration information as well as + * driver internal data needed for its operations. + * + * Supported Features: + * - This low level driver is OS independent. Allocating memory for + * the descriptor rings and buffers are not within the scope of + * this driver. + * - The user is free from Rx/Tx queue managing. + * - This low level driver introduce functionality API that enable + * the to operate Marvell's Gigabit Ethernet Controller in a + * convenient way. + * - Simple Gigabit Ethernet port operation API. + * - Simple Gigabit Ethernet port data flow API. + * - Data flow and operation API support per queue functionality. + * - Support cached descriptors for better performance. + * - Enable access to all four DRAM banks and internal SRAM memory + * spaces. + * - PHY access and control API. + * - Port control register configuration API. + * - Full control over Unicast and Multicast MAC configurations. + * + * Operation flow: + * + * Initialization phase + * This phase complete the initialization of the the mv64340_private + * struct. + * User information regarding port configuration has to be set + * prior to calling the port initialization routine. + * + * In this phase any port Tx/Rx activity is halted, MIB counters + * are cleared, PHY address is set according to user parameter and + * access to DRAM and internal SRAM memory spaces. + * + * Driver ring initialization + * Allocating memory for the descriptor rings and buffers is not + * within the scope of this driver. Thus, the user is required to + * allocate memory for the descriptors ring and buffers. Those + * memory parameters are used by the Rx and Tx ring initialization + * routines in order to curve the descriptor linked list in a form + * of a ring. + * Note: Pay special attention to alignment issues when using + * cached descriptors/buffers. In this phase the driver store + * information in the mv64340_private struct regarding each queue + * ring. + * + * Driver start + * This phase prepares the Ethernet port for Rx and Tx activity. + * It uses the information stored in the mv64340_private struct to + * initialize the various port registers. + * + * Data flow: + * All packet references to/from the driver are done using + * struct pkt_info. + * This struct is a unified struct used with Rx and Tx operations. + * This way the user is not required to be familiar with neither + * Tx nor Rx descriptors structures. + * The driver's descriptors rings are management by indexes. + * Those indexes controls the ring resources and used to indicate + * a SW resource error: + * 'current' + * This index points to the current available resource for use. For + * example in Rx process this index will point to the descriptor + * that will be passed to the user upon calling the receive routine. + * In Tx process, this index will point to the descriptor + * that will be assigned with the user packet info and transmitted. + * 'used' + * This index points to the descriptor that need to restore its + * resources. For example in Rx process, using the Rx buffer return + * API will attach the buffer returned in packet info to the + * descriptor pointed by 'used'. In Tx process, using the Tx + * descriptor return will merely return the user packet info with + * the command status of the transmitted buffer pointed by the + * 'used' index. Nevertheless, it is essential to use this routine + * to update the 'used' index. + * 'first' + * This index supports Tx Scatter-Gather. It points to the first + * descriptor of a packet assembled of multiple buffers. For example + * when in middle of Such packet we have a Tx resource error the + * 'curr' index get the value of 'first' to indicate that the ring + * returned to its state before trying to transmit this packet. + * + * Receive operation: + * The eth_port_receive API set the packet information struct, + * passed by the caller, with received information from the + * 'current' SDMA descriptor. + * It is the user responsibility to return this resource back + * to the Rx descriptor ring to enable the reuse of this source. + * Return Rx resource is done using the eth_rx_return_buff API. + * + * Transmit operation: + * The eth_port_send API supports Scatter-Gather which enables to + * send a packet spanned over multiple buffers. This means that + * for each packet info structure given by the user and put into + * the Tx descriptors ring, will be transmitted only if the 'LAST' + * bit will be set in the packet info command status field. This + * API also consider restriction regarding buffer alignments and + * sizes. + * The user must return a Tx resource after ensuring the buffer + * has been transmitted to enable the Tx ring indexes to update. + * + * BOARD LAYOUT + * This device is on-board. No jumper diagram is necessary. + * + * EXTERNAL INTERFACE + * + * Prior to calling the initialization routine eth_port_init() the user + * must set the following fields under mv64340_private struct: + * port_num User Ethernet port number. + * port_mac_addr[6] User defined port MAC address. + * port_config User port configuration value. + * port_config_extend User port config extend value. + * port_sdma_config User port SDMA config value. + * port_serial_control User port serial control value. + * + * This driver introduce a set of default values: + * PORT_CONFIG_VALUE Default port configuration value + * PORT_CONFIG_EXTEND_VALUE Default port extend configuration value + * PORT_SDMA_CONFIG_VALUE Default sdma control value + * PORT_SERIAL_CONTROL_VALUE Default port serial control value + * + * This driver data flow is done using the struct pkt_info which + * is a unified struct for Rx and Tx operations: + * + * byte_cnt Tx/Rx descriptor buffer byte count. + * l4i_chk CPU provided TCP Checksum. For Tx operation + * only. + * cmd_sts Tx/Rx descriptor command status. + * buf_ptr Tx/Rx descriptor buffer pointer. + * return_info Tx/Rx user resource return information. + */ + +/* defines */ +/* SDMA command macros */ +#define ETH_ENABLE_TX_QUEUE(eth_port) \ + MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1) + +#define ETH_DISABLE_TX_QUEUE(eth_port) \ + MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), \ + (1 << 8)) + +#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \ + MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \ + (1 << rx_queue)) + +#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \ + MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \ + (1 << (8 + rx_queue))) + +#define LINK_UP_TIMEOUT 100000 +#define PHY_BUSY_TIMEOUT 10000000 + +/* locals */ + +/* PHY routines */ +#ifdef MDD_CUT +static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); +#endif +static int ethernet_phy_get(unsigned int eth_port_num); + +/* Ethernet Port routines */ +static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, + int option); + +#ifdef MDD_CUT +static void eth_b_copy(unsigned int src_addr, unsigned int dst_addr, + int byte_count); +#endif + +/* + * eth_port_init - Initialize the Ethernet port driver + * + * DESCRIPTION: + * This function prepares the ethernet port to start its activity: + * 1) Completes the ethernet port driver struct initialization toward port + * start routine. + * 2) Resets the device to a quiescent state in case of warm reboot. + * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM. + * 4) Clean MAC tables. The reset status of those tables is unknown. + * 5) Set PHY address. + * Note: Call this routine prior to eth_port_start routine and after + * setting user values in the user fields of Ethernet port control + * struct. + * + * INPUT: + * struct mv64340_private *mp Ethernet port control struct + * + * OUTPUT: + * See description. + * + * RETURN: + * None. + */ +static void eth_port_init(struct mv64340_private * mp) +{ + mp->port_config = PORT_CONFIG_VALUE; + mp->port_config_extend = PORT_CONFIG_EXTEND_VALUE; +#if defined(__BIG_ENDIAN) + mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE; +#elif defined(__LITTLE_ENDIAN) + mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE | + ETH_BLM_RX_NO_SWAP | ETH_BLM_TX_NO_SWAP; +#else +#error One of __LITTLE_ENDIAN or __BIG_ENDIAN must be defined! +#endif + mp->port_serial_control = PORT_SERIAL_CONTROL_VALUE; + + mp->port_rx_queue_command = 0; + mp->port_tx_queue_command = 0; + + mp->rx_resource_err = 0; + mp->tx_resource_err = 0; + + eth_port_reset(mp->port_num); + + eth_port_init_mac_tables(mp->port_num); + + ethernet_phy_reset(mp->port_num); +} + +/* + * eth_port_start - Start the Ethernet port activity. + * + * DESCRIPTION: + * This routine prepares the Ethernet port for Rx and Tx activity: + * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that + * has been initialized a descriptor's ring (using + * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx) + * 2. Initialize and enable the Ethernet configuration port by writing to + * the port's configuration and command registers. + * 3. Initialize and enable the SDMA by writing to the SDMA's + * configuration and command registers. After completing these steps, + * the ethernet port SDMA can starts to perform Rx and Tx activities. + * + * Note: Each Rx and Tx queue descriptor's list must be initialized prior + * to calling this function (use ether_init_tx_desc_ring for Tx queues + * and ether_init_rx_desc_ring for Rx queues). + * + * INPUT: + * struct mv64340_private *mp Ethernet port control struct + * + * OUTPUT: + * Ethernet port is ready to receive and transmit. + * + * RETURN: + * false if the port PHY is not up. + * true otherwise. + */ +static int eth_port_start(struct mv64340_private *mp) +{ + unsigned int eth_port_num = mp->port_num; + int tx_curr_desc, rx_curr_desc; + unsigned int phy_reg_data; + + /* Assignment of Tx CTRP of given queue */ + tx_curr_desc = mp->tx_curr_desc_q; + MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num), + (struct eth_tx_desc *) mp->tx_desc_dma + tx_curr_desc); + + /* Assignment of Rx CRDP of given queue */ + rx_curr_desc = mp->rx_curr_desc_q; + MV_WRITE(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num), + (struct eth_rx_desc *) mp->rx_desc_dma + rx_curr_desc); + + /* Add the assigned Ethernet address to the port's address table */ + eth_port_uc_addr_set(mp->port_num, mp->port_mac_addr); + + /* Assign port configuration and command. */ + MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num), + mp->port_config); + + MV_WRITE(MV64340_ETH_PORT_CONFIG_EXTEND_REG(eth_port_num), + mp->port_config_extend); + + MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), + mp->port_serial_control); + + MV_SET_REG_BITS(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), + ETH_SERIAL_PORT_ENABLE); + + /* Assign port SDMA configuration */ + MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num), + mp->port_sdma_config); + + /* Enable port Rx. */ + MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port_num), + mp->port_rx_queue_command); + + /* Check if link is up */ + eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data); + + if (!(phy_reg_data & 0x20)) + return 0; + + return 1; +} + +/* + * eth_port_uc_addr_set - This function Set the port Unicast address. + * + * DESCRIPTION: + * This function Set the port Ethernet MAC address. + * + * INPUT: + * unsigned int eth_port_num Port number. + * char * p_addr Address to be set + * + * OUTPUT: + * Set MAC address low and high registers. also calls eth_port_uc_addr() + * To set the unicast table with the proper information. + * + * RETURN: + * N/A. + * + */ +static void eth_port_uc_addr_set(unsigned int eth_port_num, + unsigned char *p_addr) +{ + unsigned int mac_h; + unsigned int mac_l; + + mac_l = (p_addr[4] << 8) | (p_addr[5]); + mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | + (p_addr[2] << 8) | (p_addr[3] << 0); + + MV_WRITE(MV64340_ETH_MAC_ADDR_LOW(eth_port_num), mac_l); + MV_WRITE(MV64340_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); + + /* Accept frames of this address */ + eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR); + + return; +} + +/* + * eth_port_uc_addr - This function Set the port unicast address table + * + * DESCRIPTION: + * This function locates the proper entry in the Unicast table for the + * specified MAC nibble and sets its properties according to function + * parameters. + * + * INPUT: + * unsigned int eth_port_num Port number. + * unsigned char uc_nibble Unicast MAC Address last nibble. + * int option 0 = Add, 1 = remove address. + * + * OUTPUT: + * This function add/removes MAC addresses from the port unicast address + * table. + * + * RETURN: + * true is output succeeded. + * false if option parameter is invalid. + * + */ +static int eth_port_uc_addr(unsigned int eth_port_num, + unsigned char uc_nibble, int option) +{ + unsigned int unicast_reg; + unsigned int tbl_offset; + unsigned int reg_offset; + + /* Locate the Unicast table entry */ + uc_nibble = (0xf & uc_nibble); + tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */ + reg_offset = uc_nibble % 4; /* Entry offset within the above register */ + + switch (option) { + case REJECT_MAC_ADDR: + /* Clear accepts frame bit at specified unicast DA table entry */ + unicast_reg = MV_READ((MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE + (eth_port_num) + tbl_offset)); + + unicast_reg &= (0x0E << (8 * reg_offset)); + + MV_WRITE( + (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE + (eth_port_num) + tbl_offset), unicast_reg); + break; + + case ACCEPT_MAC_ADDR: + /* Set accepts frame bit at unicast DA filter table entry */ + unicast_reg = + MV_READ( + (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE + (eth_port_num) + tbl_offset)); + + unicast_reg |= (0x01 << (8 * reg_offset)); + + MV_WRITE( + (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE + (eth_port_num) + tbl_offset), unicast_reg); + + break; + + default: + return 0; + } + + return 1; +} + +/* + * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables + * + * DESCRIPTION: + * Go through all the DA filter tables (Unicast, Special Multicast & + * Other Multicast) and set each entry to 0. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * + * OUTPUT: + * Multicast and Unicast packets are rejected. + * + * RETURN: + * None. + */ +static void eth_port_init_mac_tables(unsigned int eth_port_num) +{ + int table_index; + + /* Clear DA filter unicast table (Ex_dFUT) */ + for (table_index = 0; table_index <= 0xC; table_index += 4) + MV_WRITE( + (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE + (eth_port_num) + table_index), 0); + + for (table_index = 0; table_index <= 0xFC; table_index += 4) { + /* Clear DA filter special multicast table (Ex_dFSMT) */ + MV_WRITE( + (MV64340_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE + (eth_port_num) + table_index), 0); + /* Clear DA filter other multicast table (Ex_dFOMT) */ + MV_WRITE((MV64340_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE + (eth_port_num) + table_index), 0); + } +} + +/* + * eth_clear_mib_counters - Clear all MIB counters + * + * DESCRIPTION: + * This function clears all MIB counters of a specific ethernet port. + * A read from the MIB counter will reset the counter. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * + * OUTPUT: + * After reading all MIB counters, the counters resets. + * + * RETURN: + * MIB counter value. + * + */ +static void eth_clear_mib_counters(unsigned int eth_port_num) +{ + int i; + + /* Perform dummy reads from MIB counters */ + for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; i += 4) + MV_READ(MV64340_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); +} + + +#ifdef MDD_CUT +/* + * ethernet_phy_set - Set the ethernet port PHY address. + * + * DESCRIPTION: + * This routine set the ethernet port PHY address according to given + * parameter. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * + * OUTPUT: + * Set PHY Address Register with given PHY address parameter. + * + * RETURN: + * None. + */ +static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) +{ + unsigned int reg_data; + + reg_data = MV_READ(MV64340_ETH_PHY_ADDR_REG); + + reg_data &= ~(0x1F << (5 * eth_port_num)); + reg_data |= (phy_addr << (5 * eth_port_num)); + + MV_WRITE(MV64340_ETH_PHY_ADDR_REG, reg_data); + + return; +} +#endif + +/* + * ethernet_phy_get - Get the ethernet port PHY address. + * + * DESCRIPTION: + * This routine returns the given ethernet port PHY address. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * + * OUTPUT: + * None. + * + * RETURN: + * PHY address. + * + */ +static int ethernet_phy_get(unsigned int eth_port_num) +{ + unsigned int reg_data; + + reg_data = MV_READ(MV64340_ETH_PHY_ADDR_REG); + + return ((reg_data >> (5 * eth_port_num)) & 0x1f); +} + +/* + * ethernet_phy_reset - Reset Ethernet port PHY. + * + * DESCRIPTION: + * This routine utilize the SMI interface to reset the ethernet port PHY. + * The routine waits until the link is up again or link up is timeout. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * + * OUTPUT: + * The ethernet port PHY renew its link. + * + * RETURN: + * None. + * + */ +static int ethernet_phy_reset(unsigned int eth_port_num) +{ + unsigned int time_out = 50; + unsigned int phy_reg_data; + + /* Reset the PHY */ + eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); + phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ + eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); + + /* Poll on the PHY LINK */ + do { + eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data); + + if (time_out-- == 0) + return 0; + } while (!(phy_reg_data & 0x20)); + + return 1; +} + +/* + * eth_port_reset - Reset Ethernet port + * + * DESCRIPTION: + * This routine resets the chip by aborting any SDMA engine activity and + * clearing the MIB counters. The Receiver and the Transmit unit are in + * idle state after this command is performed and the port is disabled. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * + * OUTPUT: + * Channel activity is halted. + * + * RETURN: + * None. + * + */ +static void eth_port_reset(unsigned int eth_port_num) +{ + unsigned int reg_data; + + /* Stop Tx port activity. Check port Tx activity. */ + reg_data = + MV_READ(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port_num)); + + if (reg_data & 0xFF) { + /* Issue stop command for active channels only */ + MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG + (eth_port_num), (reg_data << 8)); + + /* Wait for all Tx activity to terminate. */ + do { + /* Check port cause register that all Tx queues are stopped */ + reg_data = + MV_READ + (MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG + (eth_port_num)); + } + while (reg_data & 0xFF); + } + + /* Stop Rx port activity. Check port Rx activity. */ + reg_data = + MV_READ(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG + (eth_port_num)); + + if (reg_data & 0xFF) { + /* Issue stop command for active channels only */ + MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG + (eth_port_num), (reg_data << 8)); + + /* Wait for all Rx activity to terminate. */ + do { + /* Check port cause register that all Rx queues are stopped */ + reg_data = + MV_READ + (MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG + (eth_port_num)); + } + while (reg_data & 0xFF); + } + + + /* Clear all MIB counters */ + eth_clear_mib_counters(eth_port_num); + + /* Reset the Enable bit in the Configuration Register */ + reg_data = + MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num)); + reg_data &= ~ETH_SERIAL_PORT_ENABLE; + MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), reg_data); + + return; +} + +/* + * ethernet_set_config_reg - Set specified bits in configuration register. + * + * DESCRIPTION: + * This function sets specified bits in the given ethernet + * configuration register. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * unsigned int value 32 bit value. + * + * OUTPUT: + * The set bits in the value parameter are set in the configuration + * register. + * + * RETURN: + * None. + * + */ +static void ethernet_set_config_reg(unsigned int eth_port_num, + unsigned int value) +{ + unsigned int eth_config_reg; + + eth_config_reg = + MV_READ(MV64340_ETH_PORT_CONFIG_REG(eth_port_num)); + eth_config_reg |= value; + MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num), + eth_config_reg); +} + +/* + * ethernet_get_config_reg - Get the port configuration register + * + * DESCRIPTION: + * This function returns the configuration register value of the given + * ethernet port. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * + * OUTPUT: + * None. + * + * RETURN: + * Port configuration register value. + */ +static unsigned int ethernet_get_config_reg(unsigned int eth_port_num) +{ + unsigned int eth_config_reg; + + eth_config_reg = MV_READ(MV64340_ETH_PORT_CONFIG_EXTEND_REG + (eth_port_num)); + return eth_config_reg; +} + + +/* + * eth_port_read_smi_reg - Read PHY registers + * + * DESCRIPTION: + * This routine utilize the SMI interface to interact with the PHY in + * order to perform PHY register read. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * unsigned int phy_reg PHY register address offset. + * unsigned int *value Register value buffer. + * + * OUTPUT: + * Write the value of a specified PHY register into given buffer. + * + * RETURN: + * false if the PHY is busy or read data is not in valid state. + * true otherwise. + * + */ +static int eth_port_read_smi_reg(unsigned int eth_port_num, + unsigned int phy_reg, unsigned int *value) +{ + int phy_addr = ethernet_phy_get(eth_port_num); + unsigned int time_out = PHY_BUSY_TIMEOUT; + unsigned int reg_value; + + /* first check that it is not busy */ + do { + reg_value = MV_READ(MV64340_ETH_SMI_REG); + if (time_out-- == 0) + return 0; + } while (reg_value & ETH_SMI_BUSY); + + /* not busy */ + + MV_WRITE(MV64340_ETH_SMI_REG, + (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); + + time_out = PHY_BUSY_TIMEOUT; /* initialize the time out var again */ + + do { + reg_value = MV_READ(MV64340_ETH_SMI_REG); + if (time_out-- == 0) + return 0; + } while (reg_value & ETH_SMI_READ_VALID); + + /* Wait for the data to update in the SMI register */ + for (time_out = 0; time_out < PHY_BUSY_TIMEOUT; time_out++); + + reg_value = MV_READ(MV64340_ETH_SMI_REG); + + *value = reg_value & 0xffff; + + return 1; +} + +/* + * eth_port_write_smi_reg - Write to PHY registers + * + * DESCRIPTION: + * This routine utilize the SMI interface to interact with the PHY in + * order to perform writes to PHY registers. + * + * INPUT: + * unsigned int eth_port_num Ethernet Port number. + * unsigned int phy_reg PHY register address offset. + * unsigned int value Register value. + * + * OUTPUT: + * Write the given value to the specified PHY register. + * + * RETURN: + * false if the PHY is busy. + * true otherwise. + * + */ +static int eth_port_write_smi_reg(unsigned int eth_port_num, + unsigned int phy_reg, unsigned int value) +{ + unsigned int time_out = PHY_BUSY_TIMEOUT; + unsigned int reg_value; + int phy_addr; + + phy_addr = ethernet_phy_get(eth_port_num); + + /* first check that it is not busy */ + do { + reg_value = MV_READ(MV64340_ETH_SMI_REG); + if (time_out-- == 0) + return 0; + } while (reg_value & ETH_SMI_BUSY); + + /* not busy */ + MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | + ETH_SMI_OPCODE_WRITE | (value & 0xffff)); + + return 1; +} + +/* + * eth_port_send - Send an Ethernet packet + * + * DESCRIPTION: + * This routine send a given packet described by p_pktinfo parameter. It + * supports transmitting of a packet spaned over multiple buffers. The + * routine updates 'curr' and 'first' indexes according to the packet + * segment passed to the routine. In case the packet segment is first, + * the 'first' index is update. In any case, the 'curr' index is updated. + * If the routine get into Tx resource error it assigns 'curr' index as + * 'first'. This way the function can abort Tx process of multiple + * descriptors per packet. + * + * INPUT: + * struct mv64340_private *mp Ethernet Port Control srtuct. + * struct pkt_info *p_pkt_info User packet buffer. + * + * OUTPUT: + * Tx ring 'curr' and 'first' indexes are updated. + * + * RETURN: + * ETH_QUEUE_FULL in case of Tx resource error. + * ETH_ERROR in case the routine can not access Tx desc ring. + * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource. + * ETH_OK otherwise. + * + */ +#ifdef MV64340_CHECKSUM_OFFLOAD_TX +/* + * Modified to include the first descriptor pointer in case of SG + */ +static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp, + struct pkt_info * p_pkt_info) +{ + int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc; + volatile struct eth_tx_desc *current_descriptor; + volatile struct eth_tx_desc *first_descriptor; + u32 command_status, first_chip_ptr; + + /* Do not process Tx ring in case of Tx ring resource error */ + if (mp->tx_resource_err) + return ETH_QUEUE_FULL; + + /* Get the Tx Desc ring indexes */ + tx_desc_curr = mp->tx_curr_desc_q; + tx_desc_used = mp->tx_used_desc_q; + + current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; + if (current_descriptor == NULL) + return ETH_ERROR; + + tx_next_desc = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE; + command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC; + + if (command_status & ETH_TX_FIRST_DESC) { + tx_first_desc = tx_desc_curr; + mp->tx_first_desc_q = tx_first_desc; + + /* fill first descriptor */ + first_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; + first_descriptor->l4i_chk = p_pkt_info->l4i_chk; + first_descriptor->cmd_sts = command_status; + first_descriptor->byte_cnt = p_pkt_info->byte_cnt; + first_descriptor->buf_ptr = p_pkt_info->buf_ptr; + first_descriptor->next_desc_ptr = + (struct eth_tx_desc *) mp->tx_desc_dma + tx_next_desc; + wmb(); + } else { + tx_first_desc = mp->tx_first_desc_q; + first_descriptor = &mp->p_tx_desc_area[tx_first_desc]; + if (first_descriptor == NULL) { + printk("First desc is NULL !!\n"); + return ETH_ERROR; + } + if (command_status & ETH_TX_LAST_DESC) + current_descriptor->next_desc_ptr = 0x00000000; + else { + command_status |= ETH_BUFFER_OWNED_BY_DMA; + current_descriptor->next_desc_ptr = + (struct eth_tx_desc *) mp->tx_desc_dma + tx_next_desc; + } + } + + if (p_pkt_info->byte_cnt < 8) { + printk(" < 8 problem \n"); + return ETH_ERROR; + } + + current_descriptor->buf_ptr = p_pkt_info->buf_ptr; + current_descriptor->byte_cnt = p_pkt_info->byte_cnt; + current_descriptor->l4i_chk = p_pkt_info->l4i_chk; + current_descriptor->cmd_sts = command_status; + + mp->tx_skb[tx_desc_curr] = (struct sk_buff*) p_pkt_info->return_info; + + wmb(); + + /* Set last desc with DMA ownership and interrupt enable. */ + if (command_status & ETH_TX_LAST_DESC) { + current_descriptor->cmd_sts = command_status | + ETH_TX_ENABLE_INTERRUPT | + ETH_BUFFER_OWNED_BY_DMA; + + if (!(command_status & ETH_TX_FIRST_DESC)) + first_descriptor->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA; + wmb(); + + first_chip_ptr = MV_READ(MV64340_ETH_CURRENT_SERVED_TX_DESC_PTR(mp->port_num)); + + /* Apply send command */ + if (first_chip_ptr == 0x00000000) + MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(mp->port_num), (struct eth_tx_desc *) mp->tx_desc_dma + tx_first_desc); + + ETH_ENABLE_TX_QUEUE(mp->port_num); + + /* + * Finish Tx packet. Update first desc in case of Tx resource + * error */ + tx_first_desc = tx_next_desc; + mp->tx_first_desc_q = tx_first_desc; + } else { + if (! (command_status & ETH_TX_FIRST_DESC) ) { + current_descriptor->cmd_sts = command_status; + wmb(); + } + } + + /* Check for ring index overlap in the Tx desc ring */ + if (tx_next_desc == tx_desc_used) { + mp->tx_resource_err = 1; + mp->tx_curr_desc_q = tx_first_desc; + + return ETH_QUEUE_LAST_RESOURCE; + } + + mp->tx_curr_desc_q = tx_next_desc; + wmb(); + + return ETH_OK; +} +#else +static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp, + struct pkt_info * p_pkt_info) +{ + int tx_desc_curr; + int tx_desc_used; + volatile struct eth_tx_desc* current_descriptor; + unsigned int command_status; + + /* Do not process Tx ring in case of Tx ring resource error */ + if (mp->tx_resource_err) + return ETH_QUEUE_FULL; + + /* Get the Tx Desc ring indexes */ + tx_desc_curr = mp->tx_curr_desc_q; + tx_desc_used = mp->tx_used_desc_q; + current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; + + if (current_descriptor == NULL) + return ETH_ERROR; + + command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC; + +/* XXX Is this for real ?!?!? */ + /* Buffers with a payload smaller than 8 bytes must be aligned to a + * 64-bit boundary. We use the memory allocated for Tx descriptor. + * This memory is located in TX_BUF_OFFSET_IN_DESC offset within the + * Tx descriptor. */ + if (p_pkt_info->byte_cnt <= 8) { + printk(KERN_ERR + "You have failed in the < 8 bytes errata - fixme\n"); + return ETH_ERROR; + } + current_descriptor->buf_ptr = p_pkt_info->buf_ptr; + current_descriptor->byte_cnt = p_pkt_info->byte_cnt; + mp->tx_skb[tx_desc_curr] = (struct sk_buff *) p_pkt_info->return_info; + + mb(); + + /* Set last desc with DMA ownership and interrupt enable. */ + current_descriptor->cmd_sts = command_status | + ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT; + + /* Apply send command */ + ETH_ENABLE_TX_QUEUE(mp->port_num); + + /* Finish Tx packet. Update first desc in case of Tx resource error */ + tx_desc_curr = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE; + + /* Update the current descriptor */ + mp->tx_curr_desc_q = tx_desc_curr; + + /* Check for ring index overlap in the Tx desc ring */ + if (tx_desc_curr == tx_desc_used) { + mp->tx_resource_err = 1; + return ETH_QUEUE_LAST_RESOURCE; + } + + return ETH_OK; +} +#endif + +/* + * eth_tx_return_desc - Free all used Tx descriptors + * + * DESCRIPTION: + * This routine returns the transmitted packet information to the caller. + * It uses the 'first' index to support Tx desc return in case a transmit + * of a packet spanned over multiple buffer still in process. + * In case the Tx queue was in "resource error" condition, where there are + * no available Tx resources, the function resets the resource error flag. + * + * INPUT: + * struct mv64340_private *mp Ethernet Port Control srtuct. + * struct pkt_info *p_pkt_info User packet buffer. + * + * OUTPUT: + * Tx ring 'first' and 'used' indexes are updated. + * + * RETURN: + * ETH_ERROR in case the routine can not access Tx desc ring. + * ETH_RETRY in case there is transmission in process. + * ETH_END_OF_JOB if the routine has nothing to release. + * ETH_OK otherwise. + * + */ +static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private * mp, + struct pkt_info * p_pkt_info) +{ + int tx_desc_used, tx_desc_curr; +#ifdef MV64340_CHECKSUM_OFFLOAD_TX + int tx_first_desc; +#endif + volatile struct eth_tx_desc *p_tx_desc_used; + unsigned int command_status; + + /* Get the Tx Desc ring indexes */ + tx_desc_curr = mp->tx_curr_desc_q; + tx_desc_used = mp->tx_used_desc_q; +#ifdef MV64340_CHECKSUM_OFFLOAD_TX + tx_first_desc = mp->tx_first_desc_q; +#endif + p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; + + /* XXX Sanity check */ + if (p_tx_desc_used == NULL) + return ETH_ERROR; + + command_status = p_tx_desc_used->cmd_sts; + + /* Still transmitting... */ +#ifndef MV64340_CHECKSUM_OFFLOAD_TX + if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) + return ETH_RETRY; +#endif + /* Stop release. About to overlap the current available Tx descriptor */ +#ifdef MV64340_CHECKSUM_OFFLOAD_TX + if (tx_desc_used == tx_first_desc && !mp->tx_resource_err) + return ETH_END_OF_JOB; +#else + if (tx_desc_used == tx_desc_curr && !mp->tx_resource_err) + return ETH_END_OF_JOB; +#endif + + /* Pass the packet information to the caller */ + p_pkt_info->cmd_sts = command_status; + p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; + mp->tx_skb[tx_desc_used] = NULL; + + /* Update the next descriptor to release. */ + mp->tx_used_desc_q = (tx_desc_used + 1) % MV64340_TX_QUEUE_SIZE; + + /* Any Tx return cancels the Tx resource error status */ + mp->tx_resource_err = 0; + + return ETH_OK; +} + +/* + * eth_port_receive - Get received information from Rx ring. + * + * DESCRIPTION: + * This routine returns the received data to the caller. There is no + * data copying during routine operation. All information is returned + * using pointer to packet information struct passed from the caller. + * If the routine exhausts Rx ring resources then the resource error flag + * is set. + * + * INPUT: + * struct mv64340_private *mp Ethernet Port Control srtuct. + * struct pkt_info *p_pkt_info User packet buffer. + * + * OUTPUT: + * Rx ring current and used indexes are updated. + * + * RETURN: + * ETH_ERROR in case the routine can not access Rx desc ring. + * ETH_QUEUE_FULL if Rx ring resources are exhausted. + * ETH_END_OF_JOB if there is no received data. + * ETH_OK otherwise. + */ +static ETH_FUNC_RET_STATUS eth_port_receive(struct mv64340_private * mp, + struct pkt_info * p_pkt_info) +{ + int rx_next_curr_desc, rx_curr_desc, rx_used_desc; + volatile struct eth_rx_desc * p_rx_desc; + unsigned int command_status; + + /* Do not process Rx ring in case of Rx ring resource error */ + if (mp->rx_resource_err) + return ETH_QUEUE_FULL; + + /* Get the Rx Desc ring 'curr and 'used' indexes */ + rx_curr_desc = mp->rx_curr_desc_q; + rx_used_desc = mp->rx_used_desc_q; + + p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc]; + + /* The following parameters are used to save readings from memory */ + command_status = p_rx_desc->cmd_sts; + + /* Nothing to receive... */ + if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) + return ETH_END_OF_JOB; + + p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; + p_pkt_info->cmd_sts = command_status; + p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET; + p_pkt_info->return_info = mp->rx_skb[rx_curr_desc]; + p_pkt_info->l4i_chk = p_rx_desc->buf_size; + + /* Clean the return info field to indicate that the packet has been */ + /* moved to the upper layers */ + mp->rx_skb[rx_curr_desc] = NULL; + + /* Update current index in data structure */ + rx_next_curr_desc = (rx_curr_desc + 1) % MV64340_RX_QUEUE_SIZE; + mp->rx_curr_desc_q = rx_next_curr_desc; + + /* Rx descriptors exhausted. Set the Rx ring resource error flag */ + if (rx_next_curr_desc == rx_used_desc) + mp->rx_resource_err = 1; + + mb(); + return ETH_OK; +} + +/* + * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring. + * + * DESCRIPTION: + * This routine returns a Rx buffer back to the Rx ring. It retrieves the + * next 'used' descriptor and attached the returned buffer to it. + * In case the Rx ring was in "resource error" condition, where there are + * no available Rx resources, the function resets the resource error flag. + * + * INPUT: + * struct mv64340_private *mp Ethernet Port Control srtuct. + * struct pkt_info *p_pkt_info Information on the returned buffer. + * + * OUTPUT: + * New available Rx resource in Rx descriptor ring. + * + * RETURN: + * ETH_ERROR in case the routine can not access Rx desc ring. + * ETH_OK otherwise. + */ +static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv64340_private * mp, + struct pkt_info * p_pkt_info) +{ + int used_rx_desc; /* Where to return Rx resource */ + volatile struct eth_rx_desc* p_used_rx_desc; + + /* Get 'used' Rx descriptor */ + used_rx_desc = mp->rx_used_desc_q; + p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc]; + + p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr; + p_used_rx_desc->buf_size = p_pkt_info->byte_cnt; + mp->rx_skb[used_rx_desc] = p_pkt_info->return_info; + + /* Flush the write pipe */ + mb(); + + /* Return the descriptor to DMA ownership */ + p_used_rx_desc->cmd_sts = + ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT; + + /* Flush descriptor and CPU pipe */ + mb(); + + /* Move the used descriptor pointer to the next descriptor */ + mp->rx_used_desc_q = (used_rx_desc + 1) % MV64340_RX_QUEUE_SIZE; + + /* Any Rx return cancels the Rx resource error status */ + mp->rx_resource_err = 0; + + return ETH_OK; +} + +#ifdef MDD_CUT +/* + * eth_b_copy - Copy bytes from source to destination + * + * DESCRIPTION: + * This function supports the eight bytes limitation on Tx buffer size. + * The routine will zero eight bytes starting from the destination address + * followed by copying bytes from the source address to the destination. + * + * INPUT: + * unsigned int src_addr 32 bit source address. + * unsigned int dst_addr 32 bit destination address. + * int byte_count Number of bytes to copy. + * + * OUTPUT: + * See description. + * + * RETURN: + * None. + * + */ +static void eth_b_copy(unsigned int src_addr, unsigned int dst_addr, + int byte_count) +{ + /* Zero the dst_addr area */ + *(unsigned int *) dst_addr = 0x0; + + while (byte_count != 0) { + *(char *) dst_addr = *(char *) src_addr; + dst_addr++; + src_addr++; + byte_count--; + } +} +#endif diff -Nru a/drivers/net/mv64340_eth.h b/drivers/net/mv64340_eth.h --- /dev/null Wed Dec 31 16:00:00 196900 +++ b/drivers/net/mv64340_eth.h 2004-07-13 12:46:19 -07:00 @@ -0,0 +1,601 @@ +#ifndef __MV64340_ETH_H__ +#define __MV64340_ETH_H__ + +#include +#include +#include +#include +#include + +#include + +#define BIT0 0x00000001 +#define BIT1 0x00000002 +#define BIT2 0x00000004 +#define BIT3 0x00000008 +#define BIT4 0x00000010 +#define BIT5 0x00000020 +#define BIT6 0x00000040 +#define BIT7 0x00000080 +#define BIT8 0x00000100 +#define BIT9 0x00000200 +#define BIT10 0x00000400 +#define BIT11 0x00000800 +#define BIT12 0x00001000 +#define BIT13 0x00002000 +#define BIT14 0x00004000 +#define BIT15 0x00008000 +#define BIT16 0x00010000 +#define BIT17 0x00020000 +#define BIT18 0x00040000 +#define BIT19 0x00080000 +#define BIT20 0x00100000 +#define BIT21 0x00200000 +#define BIT22 0x00400000 +#define BIT23 0x00800000 +#define BIT24 0x01000000 +#define BIT25 0x02000000 +#define BIT26 0x04000000 +#define BIT27 0x08000000 +#define BIT28 0x10000000 +#define BIT29 0x20000000 +#define BIT30 0x40000000 +#define BIT31 0x80000000 + +/* + * The first part is the high level driver of the gigE ethernet ports. + */ + +#define ETH_PORT0_IRQ_NUM 48 /* main high register, bit0 */ +#define ETH_PORT1_IRQ_NUM ETH_PORT0_IRQ_NUM+1 /* main high register, bit1 */ +#define ETH_PORT2_IRQ_NUM ETH_PORT0_IRQ_NUM+2 /* main high register, bit1 */ + +/* Checksum offload for Tx works */ +#define MV64340_CHECKSUM_OFFLOAD_TX +#define MV64340_NAPI +#define MV64340_TX_FAST_REFILL +#undef MV64340_COAL + +/* + * Number of RX / TX descriptors on RX / TX rings. + * Note that allocating RX descriptors is done by allocating the RX + * ring AND a preallocated RX buffers (skb's) for each descriptor. + * The TX descriptors only allocates the TX descriptors ring, + * with no pre allocated TX buffers (skb's are allocated by higher layers. + */ + +/* Default TX ring size is 1000 descriptors */ +#define MV64340_TX_QUEUE_SIZE 1000 + +/* Default RX ring size is 400 descriptors */ +#define MV64340_RX_QUEUE_SIZE 400 + +#define MV64340_TX_COAL 100 +#ifdef MV64340_COAL +#define MV64340_RX_COAL 100 +#endif + + +/* + * The second part is the low level driver of the gigE ethernet ports. * + */ + + +/* + * Header File for : MV-643xx network interface header + * + * DESCRIPTION: + * This header file contains macros typedefs and function declaration for + * the Marvell Gig Bit Ethernet Controller. + * + * DEPENDENCIES: + * None. + * + */ + +/* Default port configuration value */ +#define PORT_CONFIG_VALUE \ + ETH_UNICAST_NORMAL_MODE | \ + ETH_DEFAULT_RX_QUEUE_0 | \ + ETH_DEFAULT_RX_ARP_QUEUE_0 | \ + ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP | \ + ETH_RECEIVE_BC_IF_IP | \ + ETH_RECEIVE_BC_IF_ARP | \ + ETH_CAPTURE_TCP_FRAMES_DIS | \ + ETH_CAPTURE_UDP_FRAMES_DIS | \ + ETH_DEFAULT_RX_TCP_QUEUE_0 | \ + ETH_DEFAULT_RX_UDP_QUEUE_0 | \ + ETH_DEFAULT_RX_BPDU_QUEUE_0 + +/* Default port extend configuration value */ +#define PORT_CONFIG_EXTEND_VALUE \ + ETH_SPAN_BPDU_PACKETS_AS_NORMAL | \ + ETH_PARTITION_DISABLE + + +/* Default sdma control value */ +#define PORT_SDMA_CONFIG_VALUE \ + ETH_RX_BURST_SIZE_16_64BIT | \ + GT_ETH_IPG_INT_RX(0) | \ + ETH_TX_BURST_SIZE_16_64BIT; + +#define GT_ETH_IPG_INT_RX(value) \ + ((value & 0x3fff) << 8) + +/* Default port serial control value */ +#define PORT_SERIAL_CONTROL_VALUE \ + ETH_FORCE_LINK_PASS | \ + ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ + ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \ + ETH_ADV_SYMMETRIC_FLOW_CTRL | \ + ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \ + ETH_FORCE_BP_MODE_NO_JAM | \ + BIT9 | \ + ETH_DO_NOT_FORCE_LINK_FAIL | \ + ETH_RETRANSMIT_16_ATTEMPTS | \ + ETH_ENABLE_AUTO_NEG_SPEED_GMII | \ + ETH_DTE_ADV_0 | \ + ETH_DISABLE_AUTO_NEG_BYPASS | \ + ETH_AUTO_NEG_NO_CHANGE | \ + ETH_MAX_RX_PACKET_9700BYTE | \ + ETH_CLR_EXT_LOOPBACK | \ + ETH_SET_FULL_DUPLEX_MODE | \ + ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX + +#define RX_BUFFER_MAX_SIZE 0x4000000 +#define TX_BUFFER_MAX_SIZE 0x4000000 + +/* MAC accepet/reject macros */ +#define ACCEPT_MAC_ADDR 0 +#define REJECT_MAC_ADDR 1 + +/* Buffer offset from buffer pointer */ +#define RX_BUF_OFFSET 0x2 + +/* Gigabit Ethernet Unit Global Registers */ + +/* MIB Counters register definitions */ +#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0 +#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4 +#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8 +#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc +#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10 +#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14 +#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18 +#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c +#define ETH_MIB_FRAMES_64_OCTETS 0x20 +#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24 +#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28 +#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c +#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30 +#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 +#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38 +#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c +#define ETH_MIB_GOOD_FRAMES_SENT 0x40 +#define ETH_MIB_EXCESSIVE_COLLISION 0x44 +#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48 +#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c +#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50 +#define ETH_MIB_FC_SENT 0x54 +#define ETH_MIB_GOOD_FC_RECEIVED 0x58 +#define ETH_MIB_BAD_FC_RECEIVED 0x5c +#define ETH_MIB_UNDERSIZE_RECEIVED 0x60 +#define ETH_MIB_FRAGMENTS_RECEIVED 0x64 +#define ETH_MIB_OVERSIZE_RECEIVED 0x68 +#define ETH_MIB_JABBER_RECEIVED 0x6c +#define ETH_MIB_MAC_RECEIVE_ERROR 0x70 +#define ETH_MIB_BAD_CRC_EVENT 0x74 +#define ETH_MIB_COLLISION 0x78 +#define ETH_MIB_LATE_COLLISION 0x7c + +/* Port serial status reg (PSR) */ +#define ETH_INTERFACE_GMII_MII 0 +#define ETH_INTERFACE_PCM BIT0 +#define ETH_LINK_IS_DOWN 0 +#define ETH_LINK_IS_UP BIT1 +#define ETH_PORT_AT_HALF_DUPLEX 0 +#define ETH_PORT_AT_FULL_DUPLEX BIT2 +#define ETH_RX_FLOW_CTRL_DISABLED 0 +#define ETH_RX_FLOW_CTRL_ENBALED BIT3 +#define ETH_GMII_SPEED_100_10 0 +#define ETH_GMII_SPEED_1000 BIT4 +#define ETH_MII_SPEED_10 0 +#define ETH_MII_SPEED_100 BIT5 +#define ETH_NO_TX 0 +#define ETH_TX_IN_PROGRESS BIT7 +#define ETH_BYPASS_NO_ACTIVE 0 +#define ETH_BYPASS_ACTIVE BIT8 +#define ETH_PORT_NOT_AT_PARTITION_STATE 0 +#define ETH_PORT_AT_PARTITION_STATE BIT9 +#define ETH_PORT_TX_FIFO_NOT_EMPTY 0 +#define ETH_PORT_TX_FIFO_EMPTY BIT10 + + +/* These macros describes the Port configuration reg (Px_cR) bits */ +#define ETH_UNICAST_NORMAL_MODE 0 +#define ETH_UNICAST_PROMISCUOUS_MODE BIT0 +#define ETH_DEFAULT_RX_QUEUE_0 0 +#define ETH_DEFAULT_RX_QUEUE_1 BIT1 +#define ETH_DEFAULT_RX_QUEUE_2 BIT2 +#define ETH_DEFAULT_RX_QUEUE_3 (BIT2 | BIT1) +#define ETH_DEFAULT_RX_QUEUE_4 BIT3 +#define ETH_DEFAULT_RX_QUEUE_5 (BIT3 | BIT1) +#define ETH_DEFAULT_RX_QUEUE_6 (BIT3 | BIT2) +#define ETH_DEFAULT_RX_QUEUE_7 (BIT3 | BIT2 | BIT1) +#define ETH_DEFAULT_RX_ARP_QUEUE_0 0 +#define ETH_DEFAULT_RX_ARP_QUEUE_1 BIT4 +#define ETH_DEFAULT_RX_ARP_QUEUE_2 BIT5 +#define ETH_DEFAULT_RX_ARP_QUEUE_3 (BIT5 | BIT4) +#define ETH_DEFAULT_RX_ARP_QUEUE_4 BIT6 +#define ETH_DEFAULT_RX_ARP_QUEUE_5 (BIT6 | BIT4) +#define ETH_DEFAULT_RX_ARP_QUEUE_6 (BIT6 | BIT5) +#define ETH_DEFAULT_RX_ARP_QUEUE_7 (BIT6 | BIT5 | BIT4) +#define ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP 0 +#define ETH_REJECT_BC_IF_NOT_IP_OR_ARP BIT7 +#define ETH_RECEIVE_BC_IF_IP 0 +#define ETH_REJECT_BC_IF_IP BIT8 +#define ETH_RECEIVE_BC_IF_ARP 0 +#define ETH_REJECT_BC_IF_ARP BIT9 +#define ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY BIT12 +#define ETH_CAPTURE_TCP_FRAMES_DIS 0 +#define ETH_CAPTURE_TCP_FRAMES_EN BIT14 +#define ETH_CAPTURE_UDP_FRAMES_DIS 0 +#define ETH_CAPTURE_UDP_FRAMES_EN BIT15 +#define ETH_DEFAULT_RX_TCP_QUEUE_0 0 +#define ETH_DEFAULT_RX_TCP_QUEUE_1 BIT16 +#define ETH_DEFAULT_RX_TCP_QUEUE_2 BIT17 +#define ETH_DEFAULT_RX_TCP_QUEUE_3 (BIT17 | BIT16) +#define ETH_DEFAULT_RX_TCP_QUEUE_4 BIT18 +#define ETH_DEFAULT_RX_TCP_QUEUE_5 (BIT18 | BIT16) +#define ETH_DEFAULT_RX_TCP_QUEUE_6 (BIT18 | BIT17) +#define ETH_DEFAULT_RX_TCP_QUEUE_7 (BIT18 | BIT17 | BIT16) +#define ETH_DEFAULT_RX_UDP_QUEUE_0 0 +#define ETH_DEFAULT_RX_UDP_QUEUE_1 BIT19 +#define ETH_DEFAULT_RX_UDP_QUEUE_2 BIT20 +#define ETH_DEFAULT_RX_UDP_QUEUE_3 (BIT20 | BIT19) +#define ETH_DEFAULT_RX_UDP_QUEUE_4 (BIT21 +#define ETH_DEFAULT_RX_UDP_QUEUE_5 (BIT21 | BIT19) +#define ETH_DEFAULT_RX_UDP_QUEUE_6 (BIT21 | BIT20) +#define ETH_DEFAULT_RX_UDP_QUEUE_7 (BIT21 | BIT20 | BIT19) +#define ETH_DEFAULT_RX_BPDU_QUEUE_0 0 +#define ETH_DEFAULT_RX_BPDU_QUEUE_1 BIT22 +#define ETH_DEFAULT_RX_BPDU_QUEUE_2 BIT23 +#define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22) +#define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24 +#define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22) +#define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23) +#define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22) + + +/* These macros describes the Port configuration extend reg (Px_cXR) bits*/ +#define ETH_CLASSIFY_EN BIT0 +#define ETH_SPAN_BPDU_PACKETS_AS_NORMAL 0 +#define ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 BIT1 +#define ETH_PARTITION_DISABLE 0 +#define ETH_PARTITION_ENABLE BIT2 + + +/* Tx/Rx queue command reg (RQCR/TQCR)*/ +#define ETH_QUEUE_0_ENABLE BIT0 +#define ETH_QUEUE_1_ENABLE BIT1 +#define ETH_QUEUE_2_ENABLE BIT2 +#define ETH_QUEUE_3_ENABLE BIT3 +#define ETH_QUEUE_4_ENABLE BIT4 +#define ETH_QUEUE_5_ENABLE BIT5 +#define ETH_QUEUE_6_ENABLE BIT6 +#define ETH_QUEUE_7_ENABLE BIT7 +#define ETH_QUEUE_0_DISABLE BIT8 +#define ETH_QUEUE_1_DISABLE BIT9 +#define ETH_QUEUE_2_DISABLE BIT10 +#define ETH_QUEUE_3_DISABLE BIT11 +#define ETH_QUEUE_4_DISABLE BIT12 +#define ETH_QUEUE_5_DISABLE BIT13 +#define ETH_QUEUE_6_DISABLE BIT14 +#define ETH_QUEUE_7_DISABLE BIT15 + + +/* These macros describes the Port Sdma configuration reg (SDCR) bits */ +#define ETH_RIFB BIT0 +#define ETH_RX_BURST_SIZE_1_64BIT 0 +#define ETH_RX_BURST_SIZE_2_64BIT BIT1 +#define ETH_RX_BURST_SIZE_4_64BIT BIT2 +#define ETH_RX_BURST_SIZE_8_64BIT (BIT2 | BIT1) +#define ETH_RX_BURST_SIZE_16_64BIT BIT3 +#define ETH_BLM_RX_NO_SWAP BIT4 +#define ETH_BLM_RX_BYTE_SWAP 0 +#define ETH_BLM_TX_NO_SWAP BIT5 +#define ETH_BLM_TX_BYTE_SWAP 0 +#define ETH_DESCRIPTORS_BYTE_SWAP BIT6 +#define ETH_DESCRIPTORS_NO_SWAP 0 +#define ETH_TX_BURST_SIZE_1_64BIT 0 +#define ETH_TX_BURST_SIZE_2_64BIT BIT22 +#define ETH_TX_BURST_SIZE_4_64BIT BIT23 +#define ETH_TX_BURST_SIZE_8_64BIT (BIT23 | BIT22) +#define ETH_TX_BURST_SIZE_16_64BIT BIT24 + + + +/* These macros describes the Port serial control reg (PSCR) bits */ +#define ETH_SERIAL_PORT_DISABLE 0 +#define ETH_SERIAL_PORT_ENABLE BIT0 +#define ETH_FORCE_LINK_PASS BIT1 +#define ETH_DO_NOT_FORCE_LINK_PASS 0 +#define ETH_ENABLE_AUTO_NEG_FOR_DUPLX 0 +#define ETH_DISABLE_AUTO_NEG_FOR_DUPLX BIT2 +#define ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL 0 +#define ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL BIT3 +#define ETH_ADV_NO_FLOW_CTRL 0 +#define ETH_ADV_SYMMETRIC_FLOW_CTRL BIT4 +#define ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0 +#define ETH_FORCE_FC_MODE_TX_PAUSE_DIS BIT5 +#define ETH_FORCE_BP_MODE_NO_JAM 0 +#define ETH_FORCE_BP_MODE_JAM_TX BIT7 +#define ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR BIT8 +#define ETH_FORCE_LINK_FAIL 0 +#define ETH_DO_NOT_FORCE_LINK_FAIL BIT10 +#define ETH_RETRANSMIT_16_ATTEMPTS 0 +#define ETH_RETRANSMIT_FOREVER BIT11 +#define ETH_DISABLE_AUTO_NEG_SPEED_GMII BIT13 +#define ETH_ENABLE_AUTO_NEG_SPEED_GMII 0 +#define ETH_DTE_ADV_0 0 +#define ETH_DTE_ADV_1 BIT14 +#define ETH_DISABLE_AUTO_NEG_BYPASS 0 +#define ETH_ENABLE_AUTO_NEG_BYPASS BIT15 +#define ETH_AUTO_NEG_NO_CHANGE 0 +#define ETH_RESTART_AUTO_NEG BIT16 +#define ETH_MAX_RX_PACKET_1518BYTE 0 +#define ETH_MAX_RX_PACKET_1522BYTE BIT17 +#define ETH_MAX_RX_PACKET_1552BYTE BIT18 +#define ETH_MAX_RX_PACKET_9022BYTE (BIT18 | BIT17) +#define ETH_MAX_RX_PACKET_9192BYTE BIT19 +#define ETH_MAX_RX_PACKET_9700BYTE (BIT19 | BIT17) +#define ETH_SET_EXT_LOOPBACK BIT20 +#define ETH_CLR_EXT_LOOPBACK 0 +#define ETH_SET_FULL_DUPLEX_MODE BIT21 +#define ETH_SET_HALF_DUPLEX_MODE 0 +#define ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX BIT22 +#define ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0 +#define ETH_SET_GMII_SPEED_TO_10_100 0 +#define ETH_SET_GMII_SPEED_TO_1000 BIT23 +#define ETH_SET_MII_SPEED_TO_10 0 +#define ETH_SET_MII_SPEED_TO_100 BIT24 + + +/* SMI reg */ +#define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */ +#define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */ +#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read operation */ +#define ETH_SMI_OPCODE_READ BIT26 /* Operation is in progress */ + +/* SDMA command status fields macros */ + +/* Tx & Rx descriptors status */ +#define ETH_ERROR_SUMMARY (BIT0) + +/* Tx & Rx descriptors command */ +#define ETH_BUFFER_OWNED_BY_DMA (BIT31) + +/* Tx descriptors status */ +#define ETH_LC_ERROR (0 ) +#define ETH_UR_ERROR (BIT1 ) +#define ETH_RL_ERROR (BIT2 ) +#define ETH_LLC_SNAP_FORMAT (BIT9 ) + +/* Rx descriptors status */ +#define ETH_CRC_ERROR (0 ) +#define ETH_OVERRUN_ERROR (BIT1 ) +#define ETH_MAX_FRAME_LENGTH_ERROR (BIT2 ) +#define ETH_RESOURCE_ERROR ((BIT2 | BIT1)) +#define ETH_VLAN_TAGGED (BIT19) +#define ETH_BPDU_FRAME (BIT20) +#define ETH_TCP_FRAME_OVER_IP_V_4 (0 ) +#define ETH_UDP_FRAME_OVER_IP_V_4 (BIT21) +#define ETH_OTHER_FRAME_TYPE (BIT22) +#define ETH_LAYER_2_IS_ETH_V_2 (BIT23) +#define ETH_FRAME_TYPE_IP_V_4 (BIT24) +#define ETH_FRAME_HEADER_OK (BIT25) +#define ETH_RX_LAST_DESC (BIT26) +#define ETH_RX_FIRST_DESC (BIT27) +#define ETH_UNKNOWN_DESTINATION_ADDR (BIT28) +#define ETH_RX_ENABLE_INTERRUPT (BIT29) +#define ETH_LAYER_4_CHECKSUM_OK (BIT30) + +/* Rx descriptors byte count */ +#define ETH_FRAME_FRAGMENTED (BIT2) + +/* Tx descriptors command */ +#define ETH_LAYER_4_CHECKSUM_FIRST_DESC (BIT10) +#define ETH_FRAME_SET_TO_VLAN (BIT15) +#define ETH_TCP_FRAME (0 ) +#define ETH_UDP_FRAME (BIT16) +#define ETH_GEN_TCP_UDP_CHECKSUM (BIT17) +#define ETH_GEN_IP_V_4_CHECKSUM (BIT18) +#define ETH_ZERO_PADDING (BIT19) +#define ETH_TX_LAST_DESC (BIT20) +#define ETH_TX_FIRST_DESC (BIT21) +#define ETH_GEN_CRC (BIT22) +#define ETH_TX_ENABLE_INTERRUPT (BIT23) +#define ETH_AUTO_MODE (BIT30) + +/* typedefs */ + +typedef enum _eth_func_ret_status { + ETH_OK, /* Returned as expected. */ + ETH_ERROR, /* Fundamental error. */ + ETH_RETRY, /* Could not process request. Try later. */ + ETH_END_OF_JOB, /* Ring has nothing to process. */ + ETH_QUEUE_FULL, /* Ring resource error. */ + ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ +} ETH_FUNC_RET_STATUS; + +typedef enum _eth_target { + ETH_TARGET_DRAM, + ETH_TARGET_DEVICE, + ETH_TARGET_CBS, + ETH_TARGET_PCI0, + ETH_TARGET_PCI1 +} ETH_TARGET; + +/* These are for big-endian machines. Little endian needs different + * definitions. + */ +#if defined(__BIG_ENDIAN) +struct eth_rx_desc { + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 cmd_sts; /* Descriptor command status */ + u32 next_desc_ptr; /* Next descriptor pointer */ + u32 buf_ptr; /* Descriptor buffer pointer */ +}; + +struct eth_tx_desc { + u16 byte_cnt; /* buffer byte count */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u32 cmd_sts; /* Command/status field */ + u32 next_desc_ptr; /* Pointer to next descriptor */ + u32 buf_ptr; /* pointer to buffer for this descriptor */ +}; + +#elif defined(__LITTLE_ENDIAN) +struct eth_rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 buf_size; /* Buffer size */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct eth_tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor */ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; +#else +#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined +#endif + +/* Unified struct for Rx and Tx operations. The user is not required to */ +/* be familier with neither Tx nor Rx descriptors. */ +struct pkt_info { + unsigned short byte_cnt; /* Descriptor buffer byte count */ + unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */ + unsigned int cmd_sts; /* Descriptor command status */ + dma_addr_t buf_ptr; /* Descriptor buffer pointer */ + struct sk_buff * return_info; /* User resource return information */ +}; + + +/* Ethernet port specific infomation */ + +struct mv64340_private { + int port_num; /* User Ethernet port number */ + u8 port_mac_addr[6]; /* User defined port MAC address. */ + u32 port_config; /* User port configuration value */ + u32 port_config_extend; /* User port config extend value */ + u32 port_sdma_config; /* User port SDMA config value */ + u32 port_serial_control; /* User port serial control value */ + u32 port_tx_queue_command; /* Port active Tx queues summary */ + u32 port_rx_queue_command; /* Port active Rx queues summary */ + + int rx_resource_err; /* Rx ring resource error flag */ + int tx_resource_err; /* Tx ring resource error flag */ + + /* Tx/Rx rings managment indexes fields. For driver use */ + + /* Next available and first returning Rx resource */ + int rx_curr_desc_q, rx_used_desc_q; + + /* Next available and first returning Tx resource */ + int tx_curr_desc_q, tx_used_desc_q; +#ifdef MV64340_CHECKSUM_OFFLOAD_TX + int tx_first_desc_q; +#endif + +#ifdef MV64340_TX_FAST_REFILL + u32 tx_clean_threshold; +#endif + + volatile struct eth_rx_desc * p_rx_desc_area; + dma_addr_t rx_desc_dma; + unsigned int rx_desc_area_size; + struct sk_buff * rx_skb[MV64340_RX_QUEUE_SIZE]; + + volatile struct eth_tx_desc * p_tx_desc_area; + dma_addr_t tx_desc_dma; + unsigned int tx_desc_area_size; + struct sk_buff * tx_skb[MV64340_TX_QUEUE_SIZE]; + + struct work_struct tx_timeout_task; + + /* + * Former struct mv64340_eth_priv members start here + */ + struct net_device_stats stats; + spinlock_t lock; + /* Size of Tx Ring per queue */ + unsigned int tx_ring_size; + /* Ammont of SKBs outstanding on Tx queue */ + unsigned int tx_ring_skbs; + /* Size of Rx Ring per queue */ + unsigned int rx_ring_size; + /* Ammount of SKBs allocated to Rx Ring per queue */ + unsigned int rx_ring_skbs; + + /* + * rx_task used to fill RX ring out of bottom half context + */ + struct work_struct rx_task; + + /* + * Used in case RX Ring is empty, which can be caused when + * system does not have resources (skb's) + */ + struct timer_list timeout; + long rx_task_busy __attribute__ ((aligned(SMP_CACHE_BYTES))); + unsigned rx_timer_flag; + + u32 rx_int_coal; + u32 tx_int_coal; +}; + +/* ethernet.h API list */ + +/* Port operation control routines */ +static void eth_port_init(struct mv64340_private *mp); +static void eth_port_reset(unsigned int eth_port_num); +static int eth_port_start(struct mv64340_private *mp); + +static void ethernet_set_config_reg(unsigned int eth_port_num, + unsigned int value); +static unsigned int ethernet_get_config_reg(unsigned int eth_port_num); + +/* Port MAC address routines */ +static void eth_port_uc_addr_set(unsigned int eth_port_num, + unsigned char *p_addr); + +/* PHY and MIB routines */ +static int ethernet_phy_reset(unsigned int eth_port_num); + +static int eth_port_write_smi_reg(unsigned int eth_port_num, + unsigned int phy_reg, + unsigned int value); + +static int eth_port_read_smi_reg(unsigned int eth_port_num, + unsigned int phy_reg, + unsigned int *value); + +static void eth_clear_mib_counters(unsigned int eth_port_num); + +/* Port data flow control routines */ +static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private *mp, + struct pkt_info * p_pkt_info); +static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private *mp, + struct pkt_info * p_pkt_info); +static ETH_FUNC_RET_STATUS eth_port_receive(struct mv64340_private *mp, + struct pkt_info * p_pkt_info); +static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv64340_private *mp, + struct pkt_info * p_pkt_info); + +#endif /* __MV64340_ETH_H__ */ diff -Nru a/drivers/net/natsemi.c b/drivers/net/natsemi.c --- a/drivers/net/natsemi.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/natsemi.c 2004-07-13 12:46:19 -07:00 @@ -230,7 +230,14 @@ #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32)) #define NATSEMI_EEPROM_SIZE 24 /* 12 16-bit values */ -#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ +/* Buffer sizes: + * The nic writes 32-bit values, even if the upper bytes of + * a 32-bit value are beyond the end of the buffer. + */ +#define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */ +#define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */ +#define NATSEMI_LONGPKT 1518 /* limit for normal packets */ +#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */ /* These identify the driver base version and may not be removed. */ static char version[] __devinitdata = @@ -354,6 +361,18 @@ #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1) +/* + * Support for fibre connections on Am79C874: + * This phy needs a special setup when connected to a fibre cable. + * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf + */ +#define PHYID_AM79C874 0x0022561b + +#define MII_MCTRL 0x15 /* mode control register */ +#define MII_FX_SEL 0x0001 /* 100BASE-FX (fiber) */ +#define MII_EN_SCRM 0x0004 /* enable scrambler (tp) */ + + /* array of board data directly indexed by pci_tbl[x].driver_data */ static struct { const char *name; @@ -457,6 +476,9 @@ EE_DataIn = 0x01, EE_ChipSelect = 0x08, EE_DataOut = 0x02, + MII_Data = 0x10, + MII_Write = 0x20, + MII_ShiftClk = 0x40, }; enum PCIBusCfg_bits { @@ -518,6 +540,22 @@ TxCarrierIgn = 0x80000000 }; +/* + * Tx Configuration: + * - 256 byte DMA burst length + * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free) + * - 64 bytes initial drain threshold (i.e. begin actual transmission + * when 64 byte are in the fifo) + * - on tx underruns, increase drain threshold by 64. + * - at most use a drain threshold of 1472 bytes: The sum of the fill + * threshold and the drain threshold must be less than 2016 bytes. + * + */ +#define TX_FLTH_VAL ((512/32) << 8) +#define TX_DRTH_VAL_START (64/32) +#define TX_DRTH_VAL_INC 2 +#define TX_DRTH_VAL_LIMIT (1472/32) + enum RxConfig_bits { RxDrthMask = 0x3e, RxMxdmaMask = 0x700000, @@ -534,6 +572,7 @@ RxAcceptRunt = 0x40000000, RxAcceptErr = 0x80000000 }; +#define RX_DRTH_VAL (128/8) enum ClkRun_bits { PMEEnable = 0x100, @@ -588,9 +627,12 @@ }; enum PhyCtrl_bits { - PhyAddrMask = 0xf, + PhyAddrMask = 0x1f, }; +#define PHY_ADDR_NONE 32 +#define PHY_ADDR_INTERNAL 1 + /* values we might find in the silicon revision register */ #define SRR_DP83815_C 0x0302 #define SRR_DP83815_D 0x0403 @@ -650,7 +692,9 @@ int oom; /* Do not touch the nic registers */ int hands_off; - /* These values are keep track of the transceiver/media in use */ + /* external phy that is used: only valid if dev->if_port != PORT_TP */ + int mii; + int phy_addr_external; unsigned int full_duplex; /* Rx filter */ u32 cur_rx_mode; @@ -663,6 +707,10 @@ u32 srr; /* expected DSPCFG value */ u16 dspcfg; + /* parms saved in ethtool format */ + u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ + u8 duplex; /* Duplex, half or full */ + u8 autoneg; /* Autonegotiation enabled */ /* MII transceiver section */ u16 advertising; unsigned int iosize; @@ -670,9 +718,14 @@ u32 msg_enable; }; +static void move_int_phy(struct net_device *dev, int addr); static int eeprom_read(long ioaddr, int location); -static int mdio_read(struct net_device *dev, int phy_id, int reg); -static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 data); +static int mdio_read(struct net_device *dev, int reg); +static void mdio_write(struct net_device *dev, int reg, u16 data); +static void init_phy_fixup(struct net_device *dev); +static int miiport_read(struct net_device *dev, int phy_id, int reg); +static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data); +static int find_mii(struct net_device *dev); static void natsemi_reset(struct net_device *dev); static void natsemi_reload_eeprom(struct net_device *dev); static void natsemi_stop_rxtx(struct net_device *dev); @@ -696,6 +749,7 @@ static void netdev_error(struct net_device *dev, int intr_status); static void netdev_rx(struct net_device *dev); static void netdev_tx_done(struct net_device *dev); +static int natsemi_change_mtu(struct net_device *dev, int new_mtu); static void __set_rx_mode(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void __get_stats(struct net_device *dev); @@ -712,6 +766,29 @@ static int netdev_get_regs(struct net_device *dev, u8 *buf); static int netdev_get_eeprom(struct net_device *dev, u8 *buf); +static void move_int_phy(struct net_device *dev, int addr) +{ + struct netdev_private *np = netdev_priv(dev); + int target = 31; + + /* + * The internal phy is visible on the external mii bus. Therefore we must + * move it away before we can send commands to an external phy. + * There are two addresses we must avoid: + * - the address on the external phy that is used for transmission. + * - the address that we want to access. User space can access phys + * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the + * phy that is used for transmission. + */ + + if (target == addr) + target--; + if (target == np->phy_addr_external) + target--; + writew(target, dev->base_addr + PhyCtrl); + readw(dev->base_addr + PhyCtrl); + udelay(1); +} static int __devinit natsemi_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) @@ -782,7 +859,7 @@ dev->base_addr = ioaddr; dev->irq = irq; - np = dev->priv; + np = netdev_priv(dev); np->pci_dev = pdev; pci_set_drvdata(pdev, dev); @@ -791,10 +868,32 @@ np->msg_enable = (debug >= 0) ? (1<hands_off = 0; + /* Initial port: + * - If the nic was configured to use an external phy and if find_mii + * finds a phy: use external port, first phy that replies. + * - Otherwise: internal port. + * Note that the phy address for the internal phy doesn't matter: + * The address would be used to access a phy over the mii bus, but + * the internal phy is accessed through mapped registers. + */ + if (readl(dev->base_addr + ChipConfig) & CfgExtPhy) + dev->if_port = PORT_MII; + else + dev->if_port = PORT_TP; /* Reset the chip to erase previous misconfiguration. */ natsemi_reload_eeprom(dev); natsemi_reset(dev); + if (dev->if_port != PORT_TP) { + np->phy_addr_external = find_mii(dev); + if (np->phy_addr_external == PHY_ADDR_NONE) { + dev->if_port = PORT_TP; + np->phy_addr_external = PHY_ADDR_INTERNAL; + } + } else { + np->phy_addr_external = PHY_ADDR_INTERNAL; + } + option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; if (dev->mem_start) option = dev->mem_start; @@ -805,8 +904,8 @@ np->full_duplex = 1; if (option & 15) printk(KERN_INFO - "%s: ignoring user supplied media type %d", - dev->name, option & 15); + "natsemi %s: ignoring user supplied media type %d", + pci_name(np->pci_dev), option & 15); } if (find_cnt < MAX_UNITS && full_duplex[find_cnt]) np->full_duplex = 1; @@ -817,6 +916,7 @@ dev->stop = &netdev_close; dev->get_stats = &get_stats; dev->set_multicast_list = &set_rx_mode; + dev->change_mtu = &natsemi_change_mtu; dev->do_ioctl = &netdev_ioctl; dev->tx_timeout = &tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -824,45 +924,57 @@ if (mtu) dev->mtu = mtu; - i = register_netdev(dev); - if (i) - goto err_register_netdev; - netif_carrier_off(dev); - if (netif_msg_drv(np)) { - printk(KERN_INFO "%s: %s at %#08lx, ", - dev->name, natsemi_pci_info[chip_idx].name, ioaddr); - for (i = 0; i < ETH_ALEN-1; i++) - printk("%02x:", dev->dev_addr[i]); - printk("%02x, IRQ %d.\n", dev->dev_addr[i], irq); - } + /* get the initial settings from hardware */ + tmp = mdio_read(dev, MII_BMCR); + np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10; + np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; + np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE; + np->advertising= mdio_read(dev, MII_ADVERTISE); - np->advertising = mdio_read(dev, 1, MII_ADVERTISE); - if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000 + if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL && netif_msg_probe(np)) { - u32 chip_config = readl(ioaddr + ChipConfig); - printk(KERN_INFO "%s: Transceiver default autonegotiation %s " + printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s " "10%s %s duplex.\n", - dev->name, - chip_config & CfgAnegEnable ? + pci_name(np->pci_dev), + (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)? "enabled, advertise" : "disabled, force", - chip_config & CfgAneg100 ? "0" : "", - chip_config & CfgAnegFull ? "full" : "half"); + (np->advertising & + (ADVERTISE_100FULL|ADVERTISE_100HALF))? + "0" : "", + (np->advertising & + (ADVERTISE_100FULL|ADVERTISE_10FULL))? + "full" : "half"); } if (netif_msg_probe(np)) printk(KERN_INFO - "%s: Transceiver status %#04x advertising %#04x.\n", - dev->name, mdio_read(dev, 1, MII_BMSR), + "natsemi %s: Transceiver status %#04x advertising %#04x.\n", + pci_name(np->pci_dev), mdio_read(dev, MII_BMSR), np->advertising); /* save the silicon revision for later querying */ np->srr = readl(ioaddr + SiliconRev); if (netif_msg_hw(np)) - printk(KERN_INFO "%s: silicon revision %#04x.\n", - dev->name, np->srr); + printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n", + pci_name(np->pci_dev), np->srr); + i = register_netdev(dev); + if (i) + goto err_register_netdev; + if (netif_msg_drv(np)) { + printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ", + dev->name, natsemi_pci_info[chip_idx].name, ioaddr, + pci_name(np->pci_dev)); + for (i = 0; i < ETH_ALEN-1; i++) + printk("%02x:", dev->dev_addr[i]); + printk("%02x, IRQ %d", dev->dev_addr[i], irq); + if (dev->if_port == PORT_TP) + printk(", port TP.\n"); + else + printk(", port MII, phy ad %d.\n", np->phy_addr_external); + } return 0; err_register_netdev: @@ -933,25 +1045,335 @@ /* MII transceiver control section. * The 83815 series has an internal transceiver, and we present the - * management registers as if they were MII connected. */ + * internal management registers as if they were MII connected. + * External Phy registers are referenced through the MII interface. + */ + +/* clock transitions >= 20ns (25MHz) + * One readl should be good to PCI @ 100MHz + */ +#define mii_delay(dev) readl(dev->base_addr + EECtrl) + +static int mii_getbit (struct net_device *dev) +{ + int data; + + writel(MII_ShiftClk, dev->base_addr + EECtrl); + data = readl(dev->base_addr + EECtrl); + writel(0, dev->base_addr + EECtrl); + mii_delay(dev); + return (data & MII_Data)? 1 : 0; +} + +static void mii_send_bits (struct net_device *dev, u32 data, int len) +{ + u32 i; + + for (i = (1 << (len-1)); i; i >>= 1) + { + u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0); + writel(mdio_val, dev->base_addr + EECtrl); + mii_delay(dev); + writel(mdio_val | MII_ShiftClk, dev->base_addr + EECtrl); + mii_delay(dev); + } + writel(0, dev->base_addr + EECtrl); + mii_delay(dev); +} + +static int miiport_read(struct net_device *dev, int phy_id, int reg) +{ + u32 cmd; + int i; + u32 retval = 0; + + /* Ensure sync */ + mii_send_bits (dev, 0xffffffff, 32); + /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ + /* ST,OP = 0110'b for read operation */ + cmd = (0x06 << 10) | (phy_id << 5) | reg; + mii_send_bits (dev, cmd, 14); + /* Turnaround */ + if (mii_getbit (dev)) + return 0; + /* Read data */ + for (i = 0; i < 16; i++) { + retval <<= 1; + retval |= mii_getbit (dev); + } + /* End cycle */ + mii_getbit (dev); + return retval; +} -static int mdio_read(struct net_device *dev, int phy_id, int reg) +static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data) { - if (phy_id == 1 && reg < 32) - return readl(dev->base_addr+BasicControl+(reg<<2))&0xffff; + u32 cmd; + + /* Ensure sync */ + mii_send_bits (dev, 0xffffffff, 32); + /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ + /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ + cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data; + mii_send_bits (dev, cmd, 32); + /* End cycle */ + mii_getbit (dev); +} + +static int mdio_read(struct net_device *dev, int reg) +{ + struct netdev_private *np = netdev_priv(dev); + + /* The 83815 series has two ports: + * - an internal transceiver + * - an external mii bus + */ + if (dev->if_port == PORT_TP) + return readw(dev->base_addr+BasicControl+(reg<<2)); else - return 0xffff; + return miiport_read(dev, np->phy_addr_external, reg); } -static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 data) +static void mdio_write(struct net_device *dev, int reg, u16 data) { - struct netdev_private *np = dev->priv; - if (phy_id == 1 && reg < 32) { + struct netdev_private *np = netdev_priv(dev); + + /* The 83815 series has an internal transceiver; handle separately */ + if (dev->if_port == PORT_TP) writew(data, dev->base_addr+BasicControl+(reg<<2)); - switch (reg) { - case MII_ADVERTISE: np->advertising = data; break; + else + miiport_write(dev, np->phy_addr_external, reg, data); +} + +static void init_phy_fixup(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + long ioaddr = dev->base_addr; + int i; + u32 cfg; + u16 tmp; + + /* restore stuff lost when power was out */ + tmp = mdio_read(dev, MII_BMCR); + if (np->autoneg == AUTONEG_ENABLE) { + /* renegotiate if something changed */ + if ((tmp & BMCR_ANENABLE) == 0 + || np->advertising != mdio_read(dev, MII_ADVERTISE)) + { + /* turn on autonegotiation and force negotiation */ + tmp |= (BMCR_ANENABLE | BMCR_ANRESTART); + mdio_write(dev, MII_ADVERTISE, np->advertising); + } + } else { + /* turn off auto negotiation, set speed and duplexity */ + tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); + if (np->speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (np->duplex == DUPLEX_FULL) + tmp |= BMCR_FULLDPLX; + /* + * Note: there is no good way to inform the link partner + * that our capabilities changed. The user has to unplug + * and replug the network cable after some changes, e.g. + * after switching from 10HD, autoneg off to 100 HD, + * autoneg off. + */ + } + mdio_write(dev, MII_BMCR, tmp); + readl(dev->base_addr + ChipConfig); + udelay(1); + + /* find out what phy this is */ + np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + + mdio_read(dev, MII_PHYSID2); + + /* handle external phys here */ + switch (np->mii) { + case PHYID_AM79C874: + /* phy specific configuration for fibre/tp operation */ + tmp = mdio_read(dev, MII_MCTRL); + tmp &= ~(MII_FX_SEL | MII_EN_SCRM); + if (dev->if_port == PORT_FIBRE) + tmp |= MII_FX_SEL; + else + tmp |= MII_EN_SCRM; + mdio_write(dev, MII_MCTRL, tmp); + break; + default: + break; + } + cfg = readl(dev->base_addr + ChipConfig); + if (cfg & CfgExtPhy) + return; + + /* On page 78 of the spec, they recommend some settings for "optimum + performance" to be done in sequence. These settings optimize some + of the 100Mbit autodetection circuitry. They say we only want to + do this for rev C of the chip, but engineers at NSC (Bradley + Kennedy) recommends always setting them. If you don't, you get + errors on some autonegotiations that make the device unusable. + + It seems that the DSP needs a few usec to reinitialize after + the start of the phy. Just retry writing these values until they + stick. + */ + for (i=0;idspcfg = DSPCFG_VAL; + writew(np->dspcfg, ioaddr + DSPCFG); + writew(SDCFG_VAL, ioaddr + SDCFG); + writew(0, ioaddr + PGSEL); + readl(ioaddr + ChipConfig); + udelay(10); + + writew(1, ioaddr + PGSEL); + dspcfg = readw(ioaddr + DSPCFG); + writew(0, ioaddr + PGSEL); + if (np->dspcfg == dspcfg) + break; + } + + if (netif_msg_link(np)) { + if (i==NATSEMI_HW_TIMEOUT) { + printk(KERN_INFO + "%s: DSPCFG mismatch after retrying for %d usec.\n", + dev->name, i*10); + } else { + printk(KERN_INFO + "%s: DSPCFG accepted after %d usec.\n", + dev->name, i*10); } } + /* + * Enable PHY Specific event based interrupts. Link state change + * and Auto-Negotiation Completion are among the affected. + * Read the intr status to clear it (needed for wake events). + */ + readw(ioaddr + MIntrStatus); + writew(MICRIntEn, ioaddr + MIntrCtrl); +} + +static int switch_port_external(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + u32 cfg; + + cfg = readl(dev->base_addr + ChipConfig); + if (cfg & CfgExtPhy) + return 0; + + if (netif_msg_link(np)) { + printk(KERN_INFO "%s: switching to external transceiver.\n", + dev->name); + } + + /* 1) switch back to external phy */ + writel(cfg | (CfgExtPhy | CfgPhyDis), dev->base_addr + ChipConfig); + readl(dev->base_addr + ChipConfig); + udelay(1); + + /* 2) reset the external phy: */ + /* resetting the external PHY has been known to cause a hub supplying + * power over Ethernet to kill the power. We don't want to kill + * power to this computer, so we avoid resetting the phy. + */ + + /* 3) reinit the phy fixup, it got lost during power down. */ + move_int_phy(dev, np->phy_addr_external); + init_phy_fixup(dev); + + return 1; +} + +static int switch_port_internal(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + u32 cfg; + u16 bmcr; + + cfg = readl(dev->base_addr + ChipConfig); + if (!(cfg &CfgExtPhy)) + return 0; + + if (netif_msg_link(np)) { + printk(KERN_INFO "%s: switching to internal transceiver.\n", + dev->name); + } + /* 1) switch back to internal phy: */ + cfg = cfg & ~(CfgExtPhy | CfgPhyDis); + writel(cfg, dev->base_addr + ChipConfig); + readl(dev->base_addr + ChipConfig); + udelay(1); + + /* 2) reset the internal phy: */ + bmcr = readw(dev->base_addr+BasicControl+(MII_BMCR<<2)); + writel(bmcr | BMCR_RESET, dev->base_addr+BasicControl+(MII_BMCR<<2)); + readl(dev->base_addr + ChipConfig); + udelay(10); + for (i=0;ibase_addr+BasicControl+(MII_BMCR<<2)); + if (!(bmcr & BMCR_RESET)) + break; + udelay(10); + } + if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) { + printk(KERN_INFO + "%s: phy reset did not complete in %d usec.\n", + dev->name, i*10); + } + /* 3) reinit the phy fixup, it got lost during power down. */ + init_phy_fixup(dev); + + return 1; +} + +/* Scan for a PHY on the external mii bus. + * There are two tricky points: + * - Do not scan while the internal phy is enabled. The internal phy will + * crash: e.g. reads from the DSPCFG register will return odd values and + * the nasty random phy reset code will reset the nic every few seconds. + * - The internal phy must be moved around, an external phy could + * have the same address as the internal phy. + */ +static int find_mii(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int tmp; + int i; + int did_switch; + + /* Switch to external phy */ + did_switch = switch_port_external(dev); + + /* Scan the possible phy addresses: + * + * PHY address 0 means that the phy is in isolate mode. Not yet + * supported due to lack of test hardware. User space should + * handle it through ethtool. + */ + for (i = 1; i <= 31; i++) { + move_int_phy(dev, i); + tmp = miiport_read(dev, i, MII_BMSR); + if (tmp != 0xffff && tmp != 0x0000) { + /* found something! */ + np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + + mdio_read(dev, MII_PHYSID2); + if (netif_msg_probe(np)) { + printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n", + pci_name(np->pci_dev), np->mii, i); + } + break; + } + } + /* And switch back to internal phy: */ + if (did_switch) + switch_port_internal(dev); + return i; } /* CFG bits [13:16] [18:23] */ @@ -969,7 +1391,7 @@ u32 rfcr; u16 pmatch[3]; u16 sopass[3]; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); /* * Resetting the chip causes some registers to be lost. @@ -1013,6 +1435,11 @@ /* restore CFG */ cfg |= readl(dev->base_addr + ChipConfig) & ~CFG_RESET_SAVE; + /* turn on external phy if it was selected */ + if (dev->if_port == PORT_TP) + cfg &= ~(CfgExtPhy | CfgPhyDis); + else + cfg |= (CfgExtPhy | CfgPhyDis); writel(cfg, dev->base_addr + ChipConfig); /* restore WCSR */ wcsr |= readl(dev->base_addr + WOLCmd) & ~WCSR_RESET_SAVE; @@ -1034,7 +1461,7 @@ static void natsemi_reload_eeprom(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); int i; writel(EepromReload, dev->base_addr + PCIBusCfg); @@ -1044,18 +1471,18 @@ break; } if (i==NATSEMI_HW_TIMEOUT) { - printk(KERN_WARNING "%s: EEPROM did not reload in %d usec.\n", - dev->name, i*50); + printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n", + pci_name(np->pci_dev), i*50); } else if (netif_msg_hw(np)) { - printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n", - dev->name, i*50); + printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n", + pci_name(np->pci_dev), i*50); } } static void natsemi_stop_rxtx(struct net_device *dev) { long ioaddr = dev->base_addr; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); int i; writel(RxOff | TxOff, ioaddr + ChipCmd); @@ -1075,7 +1502,7 @@ static int netdev_open(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; int i; @@ -1124,7 +1551,10 @@ static void do_cable_magic(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); + + if (dev->if_port != PORT_TP) + return; if (np->srr >= SRR_DP83816_A5) return; @@ -1149,7 +1579,7 @@ * (these values all come from National) */ if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); /* the bug has been triggered - fix the coefficient */ writew(TSTDAT_FIXED, dev->base_addr + TSTDAT); @@ -1165,7 +1595,10 @@ static void undo_cable_magic(struct net_device *dev) { u16 data; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); + + if (dev->if_port != PORT_TP) + return; if (np->srr >= SRR_DP83816_A5) return; @@ -1180,12 +1613,19 @@ static void check_link(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; int duplex; - int chipcfg = readl(ioaddr + ChipConfig); + u16 bmsr; + + /* The link status field is latched: it remains low after a temporary + * link failure until it's read. We need the current link status, + * thus read twice. + */ + mdio_read(dev, MII_BMSR); + bmsr = mdio_read(dev, MII_BMSR); - if (!(chipcfg & CfgLink)) { + if (!(bmsr & BMSR_LSTATUS)) { if (netif_carrier_ok(dev)) { if (netif_msg_link(np)) printk(KERN_NOTICE "%s: link down.\n", @@ -1202,7 +1642,16 @@ do_cable_magic(dev); } - duplex = np->full_duplex || (chipcfg & CfgFullDuplex ? 1 : 0); + duplex = np->full_duplex; + if (!duplex) { + if (bmsr & BMSR_ANEGCOMPLETE) { + int tmp = mii_nway_result( + np->advertising & mdio_read(dev, MII_LPA)); + if (tmp == LPA_100FULL || tmp == LPA_10FULL) + duplex = 1; + } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX) + duplex = 1; + } /* if duplex is set then bit 28 must be set, too */ if (duplex ^ !!(np->rx_config & RxAcceptTx)) { @@ -1225,42 +1674,10 @@ static void init_registers(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; - int i; - - for (i=0;ibase_addr + ChipConfig) & CfgAnegDone) - break; - udelay(10); - } - if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) { - printk(KERN_INFO - "%s: autonegotiation did not complete in %d usec.\n", - dev->name, i*10); - } - /* On page 78 of the spec, they recommend some settings for "optimum - performance" to be done in sequence. These settings optimize some - of the 100Mbit autodetection circuitry. They say we only want to - do this for rev C of the chip, but engineers at NSC (Bradley - Kennedy) recommends always setting them. If you don't, you get - errors on some autonegotiations that make the device unusable. - */ - writew(1, ioaddr + PGSEL); - writew(PMDCSR_VAL, ioaddr + PMDCSR); - writew(TSTDAT_VAL, ioaddr + TSTDAT); - writew(DSPCFG_VAL, ioaddr + DSPCFG); - writew(SDCFG_VAL, ioaddr + SDCFG); - writew(0, ioaddr + PGSEL); - np->dspcfg = DSPCFG_VAL; - - /* Enable PHY Specific event based interrupts. Link state change - and Auto-Negotiation Completion are among the affected. - Read the intr status to clear it (needed for wake events). - */ - readw(ioaddr + MIntrStatus); - writew(MICRIntEn, ioaddr + MIntrCtrl); + init_phy_fixup(dev); /* clear any interrupts that are pending, such as wake events */ readl(ioaddr + IntrStatus); @@ -1283,13 +1700,18 @@ * ECRETRY=1 * ATP=1 */ - np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | (0x1002); + np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | + TX_FLTH_VAL | TX_DRTH_VAL_START; writel(np->tx_config, ioaddr + TxConfig); /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo * MXDMA 0: up to 256 byte bursts */ - np->rx_config = RxMxdma_256 | 0x20; + np->rx_config = RxMxdma_256 | RX_DRTH_VAL; + /* if receive ring now has bigger buffers than normal, enable jumbo */ + if (np->rx_buf_sz > NATSEMI_LONGPKT) + np->rx_config |= RxAcceptLong; + writel(np->rx_config, ioaddr + RxConfig); /* Disable PME: @@ -1331,10 +1753,8 @@ static void netdev_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); int next_tick = 5*HZ; - long ioaddr = dev->base_addr; - u16 dspcfg; if (netif_msg_timer(np)) { /* DO NOT read the IntrStatus register, @@ -1344,33 +1764,41 @@ dev->name); } - spin_lock_irq(&np->lock); + if (dev->if_port == PORT_TP) { + long ioaddr = dev->base_addr; + u16 dspcfg; - /* check for a nasty random phy-reset - use dspcfg as a flag */ - writew(1, ioaddr+PGSEL); - dspcfg = readw(ioaddr+DSPCFG); - writew(0, ioaddr+PGSEL); - if (dspcfg != np->dspcfg) { - if (!netif_queue_stopped(dev)) { - spin_unlock_irq(&np->lock); - if (netif_msg_hw(np)) - printk(KERN_NOTICE "%s: possible phy reset: " - "re-initializing\n", dev->name); - disable_irq(dev->irq); - spin_lock_irq(&np->lock); - natsemi_stop_rxtx(dev); - dump_ring(dev); - reinit_ring(dev); - init_registers(dev); - spin_unlock_irq(&np->lock); - enable_irq(dev->irq); + spin_lock_irq(&np->lock); + /* check for a nasty random phy-reset - use dspcfg as a flag */ + writew(1, ioaddr+PGSEL); + dspcfg = readw(ioaddr+DSPCFG); + writew(0, ioaddr+PGSEL); + if (dspcfg != np->dspcfg) { + if (!netif_queue_stopped(dev)) { + spin_unlock_irq(&np->lock); + if (netif_msg_hw(np)) + printk(KERN_NOTICE "%s: possible phy reset: " + "re-initializing\n", dev->name); + disable_irq(dev->irq); + spin_lock_irq(&np->lock); + natsemi_stop_rxtx(dev); + dump_ring(dev); + reinit_ring(dev); + init_registers(dev); + spin_unlock_irq(&np->lock); + enable_irq(dev->irq); + } else { + /* hurry back */ + next_tick = HZ; + spin_unlock_irq(&np->lock); + } } else { - /* hurry back */ - next_tick = HZ; + /* init_registers() calls check_link() for the above case */ + check_link(dev); spin_unlock_irq(&np->lock); } } else { - /* init_registers() calls check_link() for the above case */ + spin_lock_irq(&np->lock); check_link(dev); spin_unlock_irq(&np->lock); } @@ -1390,7 +1818,7 @@ static void dump_ring(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); if (netif_msg_pktdata(np)) { int i; @@ -1413,7 +1841,7 @@ static void tx_timeout(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; disable_irq(dev->irq); @@ -1444,7 +1872,7 @@ static int alloc_ring(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); np->rx_ring = pci_alloc_consistent(np->pci_dev, sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), &np->ring_dma); @@ -1456,14 +1884,14 @@ static void refill_rx(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); /* Refill the Rx ring buffers. */ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { struct sk_buff *skb; int entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { - unsigned int buflen = np->rx_buf_sz + RX_OFFSET; + unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING; skb = dev_alloc_skb(buflen); np->rx_skbuff[entry] = skb; if (skb == NULL) @@ -1482,10 +1910,19 @@ } } +static void set_bufsize(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + if (dev->mtu <= ETH_DATA_LEN) + np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS; + else + np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS; +} + /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void init_ring(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); int i; /* 1) TX ring */ @@ -1501,8 +1938,9 @@ /* 2) RX ring */ np->dirty_rx = 0; np->cur_rx = RX_RING_SIZE; - np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); np->oom = 0; + set_bufsize(dev); + np->rx_head_desc = &np->rx_ring[0]; /* Please be carefull before changing this loop - at least gcc-2.95.1 @@ -1522,7 +1960,7 @@ static void drain_tx(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); int i; for (i = 0; i < TX_RING_SIZE; i++) { @@ -1537,10 +1975,10 @@ } } -static void drain_ring(struct net_device *dev) +static void drain_rx(struct net_device *dev) { - struct netdev_private *np = dev->priv; - unsigned int buflen = np->rx_buf_sz + RX_OFFSET; + struct netdev_private *np = netdev_priv(dev); + unsigned int buflen = np->rx_buf_sz; int i; /* Free all the skbuffs in the Rx queue. */ @@ -1555,28 +1993,27 @@ } np->rx_skbuff[i] = NULL; } +} + +static void drain_ring(struct net_device *dev) +{ + drain_rx(dev); drain_tx(dev); } static void free_ring(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); pci_free_consistent(np->pci_dev, sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), np->rx_ring, np->ring_dma); } -static void reinit_ring(struct net_device *dev) +static void reinit_rx(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); int i; - /* drain TX ring */ - drain_tx(dev); - np->dirty_tx = np->cur_tx = 0; - for (i=0;itx_ring[i].cmd_status = 0; - /* RX Ring */ np->dirty_rx = 0; np->cur_rx = RX_RING_SIZE; @@ -1588,9 +2025,23 @@ refill_rx(dev); } +static void reinit_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + /* drain TX ring */ + drain_tx(dev); + np->dirty_tx = np->cur_tx = 0; + for (i=0;itx_ring[i].cmd_status = 0; + + reinit_rx(dev); +} + static int start_tx(struct sk_buff *skb, struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); unsigned entry; /* Note: Ordering is important here, set the field with the @@ -1637,7 +2088,7 @@ static void netdev_tx_done(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; @@ -1683,7 +2134,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) { struct net_device *dev = dev_instance; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; int boguscnt = max_interrupt_work; unsigned int handled = 0; @@ -1741,20 +2192,22 @@ for clarity and better register allocation. */ static void netdev_rx(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); - unsigned int buflen = np->rx_buf_sz + RX_OFFSET; + unsigned int buflen = np->rx_buf_sz; /* If the driver owns the next entry it's a new packet. Send it up. */ while (desc_status < 0) { /* e.g. & DescOwn */ + int pkt_len; if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() entry %d status was %#08x.\n", entry, desc_status); if (--boguscnt < 0) break; + pkt_len = (desc_status & DescSizeMask) - 4; if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ if (desc_status & DescMore) { if (netif_msg_rx_err(np)) @@ -1777,10 +2230,14 @@ if (desc_status & DescRxCRC) np->stats.rx_crc_errors++; } + } else if (pkt_len > np->rx_buf_sz) { + /* if this is the tail of a double buffer + * packet, we've already counted the error + * on the first part. Ignore the second half. + */ } else { struct sk_buff *skb; /* Omit CRC size. */ - int pkt_len = (desc_status & DescSizeMask) - 4; /* Check if the packet is long enough to accept * without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak @@ -1826,19 +2283,18 @@ static void netdev_error(struct net_device *dev, int intr_status) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; spin_lock(&np->lock); if (intr_status & LinkChange) { - u16 adv = mdio_read(dev, 1, MII_ADVERTISE); - u16 lpa = mdio_read(dev, 1, MII_LPA); - if (mdio_read(dev, 1, MII_BMCR) & BMCR_ANENABLE + u16 lpa = mdio_read(dev, MII_LPA); + if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE && netif_msg_link(np)) { printk(KERN_INFO "%s: Autonegotiation advertising" " %#04x partner %#04x.\n", dev->name, - adv, lpa); + np->advertising, lpa); } /* read MII int status to clear the flag */ @@ -1849,12 +2305,18 @@ __get_stats(dev); } if (intr_status & IntrTxUnderrun) { - if ((np->tx_config & TxDrthMask) < 62) - np->tx_config += 2; - if (netif_msg_tx_err(np)) - printk(KERN_NOTICE - "%s: increased Tx threshold, txcfg %#08x.\n", - dev->name, np->tx_config); + if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) { + np->tx_config += TX_DRTH_VAL_INC; + if (netif_msg_tx_err(np)) + printk(KERN_NOTICE + "%s: increased tx threshold, txcfg %#08x.\n", + dev->name, np->tx_config); + } else { + if (netif_msg_tx_err(np)) + printk(KERN_NOTICE + "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n", + dev->name, np->tx_config); + } writel(np->tx_config, ioaddr + TxConfig); } if (intr_status & WOLPkt && netif_msg_wol(np)) { @@ -1882,7 +2344,7 @@ static void __get_stats(struct net_device *dev) { long ioaddr = dev->base_addr; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); /* The chip only need report frame silently dropped. */ np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); @@ -1891,7 +2353,7 @@ static struct net_device_stats *get_stats(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); /* The chip only need report frame silently dropped. */ spin_lock_irq(&np->lock); @@ -1906,7 +2368,7 @@ static void __set_rx_mode(struct net_device *dev) { long ioaddr = dev->base_addr; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); u8 mc_filter[64]; /* Multicast hash filter */ u32 rx_mode; @@ -1941,9 +2403,39 @@ np->cur_rx_mode = rx_mode; } +static int natsemi_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS) + return -EINVAL; + + dev->mtu = new_mtu; + + /* synchronized against open : rtnl_lock() held by caller */ + if (netif_running(dev)) { + struct netdev_private *np = netdev_priv(dev); + long ioaddr = dev->base_addr; + + disable_irq(dev->irq); + spin_lock(&np->lock); + /* stop engines */ + natsemi_stop_rxtx(dev); + /* drain rx queue */ + drain_rx(dev); + /* change buffers */ + set_bufsize(dev); + reinit_rx(dev); + writel(np->ring_dma, ioaddr + RxRingPtr); + /* restart engines */ + writel(RxOn | TxOn, ioaddr + ChipCmd); + spin_unlock(&np->lock); + enable_irq(dev->irq); + } + return 0; +} + static void set_rx_mode(struct net_device *dev) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); if (!np->hands_off) __set_rx_mode(dev); @@ -1952,7 +2444,7 @@ static int netdev_ethtool_ioctl(struct net_device *dev, void __user *useraddr) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); u32 cmd; if (get_user(cmd, (u32 __user *)useraddr)) @@ -2066,10 +2558,10 @@ int tmp; int r = -EINVAL; /* if autoneg is off, it's an error */ - tmp = mdio_read(dev, 1, MII_BMCR); + tmp = mdio_read(dev, MII_BMCR); if (tmp & BMCR_ANENABLE) { tmp |= (BMCR_ANRESTART); - mdio_write(dev, 1, MII_BMCR, tmp); + mdio_write(dev, MII_BMCR, tmp); r = 0; } return r; @@ -2078,8 +2570,8 @@ case ETHTOOL_GLINK: { struct ethtool_value edata = {ETHTOOL_GLINK}; /* LSTATUS is latched low until a read - so read twice */ - mdio_read(dev, 1, MII_BMSR); - edata.data = (mdio_read(dev, 1, MII_BMSR)&BMSR_LSTATUS) ? 1:0; + mdio_read(dev, MII_BMSR); + edata.data = (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0; if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; @@ -2123,7 +2615,7 @@ static int netdev_set_wol(struct net_device *dev, u32 newval) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); u32 data = readl(dev->base_addr + WOLCmd) & ~WakeOptsSummary; /* translate to bitmasks this chip understands */ @@ -2152,7 +2644,7 @@ static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); u32 regval = readl(dev->base_addr + WOLCmd); *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST @@ -2187,7 +2679,7 @@ static int netdev_set_sopass(struct net_device *dev, u8 *newval) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); u16 *sval = (u16 *)newval; u32 addr; @@ -2218,7 +2710,7 @@ static int netdev_get_sopass(struct net_device *dev, u8 *data) { - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); u16 *sval = (u16 *)data; u32 addr; @@ -2246,56 +2738,75 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) { + struct netdev_private *np = netdev_priv(dev); u32 tmp; - ecmd->supported = - (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); - - /* only supports twisted-pair or MII */ - tmp = readl(dev->base_addr + ChipConfig); - if (tmp & CfgExtPhy) - ecmd->port = PORT_MII; - else - ecmd->port = PORT_TP; - - /* only supports internal transceiver */ - ecmd->transceiver = XCVR_INTERNAL; - - /* not sure what this is for */ - ecmd->phy_address = readw(dev->base_addr + PhyCtrl) & PhyAddrMask; - - ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; - tmp = mdio_read(dev, 1, MII_ADVERTISE); - if (tmp & ADVERTISE_10HALF) + ecmd->port = dev->if_port; + ecmd->speed = np->speed; + ecmd->duplex = np->duplex; + ecmd->autoneg = np->autoneg; + ecmd->advertising = 0; + if (np->advertising & ADVERTISE_10HALF) ecmd->advertising |= ADVERTISED_10baseT_Half; - if (tmp & ADVERTISE_10FULL) + if (np->advertising & ADVERTISE_10FULL) ecmd->advertising |= ADVERTISED_10baseT_Full; - if (tmp & ADVERTISE_100HALF) + if (np->advertising & ADVERTISE_100HALF) ecmd->advertising |= ADVERTISED_100baseT_Half; - if (tmp & ADVERTISE_100FULL) + if (np->advertising & ADVERTISE_100FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; + ecmd->supported = (SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE); + ecmd->phy_address = np->phy_addr_external; + /* + * We intentionally report the phy address of the external + * phy, even if the internal phy is used. This is necessary + * to work around a deficiency of the ethtool interface: + * It's only possible to query the settings of the active + * port. Therefore + * # ethtool -s ethX port mii + * actually sends an ioctl to switch to port mii with the + * settings that are used for the current active port. + * If we would report a different phy address in this + * command, then + * # ethtool -s ethX port tp;ethtool -s ethX port mii + * would unintentionally change the phy address. + * + * Fortunately the phy address doesn't matter with the + * internal phy... + */ - tmp = mdio_read(dev, 1, MII_BMCR); - if (tmp & BMCR_ANENABLE) { - ecmd->advertising |= ADVERTISED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; - } else { - ecmd->autoneg = AUTONEG_DISABLE; - } - - tmp = readl(dev->base_addr + ChipConfig); - if (tmp & CfgSpeed100) { - ecmd->speed = SPEED_100; - } else { - ecmd->speed = SPEED_10; + /* set information based on active port type */ + switch (ecmd->port) { + default: + case PORT_TP: + ecmd->advertising |= ADVERTISED_TP; + ecmd->transceiver = XCVR_INTERNAL; + break; + case PORT_MII: + ecmd->advertising |= ADVERTISED_MII; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case PORT_FIBRE: + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + break; } - if (tmp & CfgFullDuplex) { - ecmd->duplex = DUPLEX_FULL; - } else { - ecmd->duplex = DUPLEX_HALF; + /* if autonegotiation is on, try to return the active speed/duplex */ + if (ecmd->autoneg == AUTONEG_ENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + tmp = mii_nway_result( + np->advertising & mdio_read(dev, MII_LPA)); + if (tmp == LPA_100FULL || tmp == LPA_100HALF) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + if (tmp == LPA_100FULL || tmp == LPA_10FULL) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; } /* ignore maxtxpkt, maxrxpkt for now */ @@ -2305,39 +2816,75 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) { - struct netdev_private *np = dev->priv; - u32 tmp; + struct netdev_private *np = netdev_priv(dev); - if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) + if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE) return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL) return -EINVAL; - if (ecmd->port != PORT_TP && ecmd->port != PORT_MII) - return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL) - return -EINVAL; - if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + if (ecmd->autoneg == AUTONEG_ENABLE) { + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) == 0) { + return -EINVAL; + } + } else if (ecmd->autoneg == AUTONEG_DISABLE) { + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { return -EINVAL; - /* ignore phy_address, maxtxpkt, maxrxpkt for now */ + } + + /* + * maxtxpkt, maxrxpkt: ignored for now. + * + * transceiver: + * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always + * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and + * selects based on ecmd->port. + * + * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre + * phys that are connected to the mii bus. It's used to apply fibre + * specific updates. + */ /* WHEW! now lets bang some bits */ - tmp = mdio_read(dev, 1, MII_BMCR); - if (ecmd->autoneg == AUTONEG_ENABLE) { - /* turn on autonegotiation */ - tmp |= BMCR_ANENABLE; - np->advertising = mdio_read(dev, 1, MII_ADVERTISE); + /* save the parms */ + dev->if_port = ecmd->port; + np->autoneg = ecmd->autoneg; + np->phy_addr_external = ecmd->phy_address & PhyAddrMask; + if (np->autoneg == AUTONEG_ENABLE) { + /* advertise only what has been requested */ + np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (ecmd->advertising & ADVERTISED_10baseT_Half) + np->advertising |= ADVERTISE_10HALF; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + np->advertising |= ADVERTISE_10FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Half) + np->advertising |= ADVERTISE_100HALF; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + np->advertising |= ADVERTISE_100FULL; } else { - /* turn off auto negotiation, set speed and duplexity */ - tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); - if (ecmd->speed == SPEED_100) - tmp |= BMCR_SPEED100; - if (ecmd->duplex == DUPLEX_FULL) - tmp |= BMCR_FULLDPLX; - else + np->speed = ecmd->speed; + np->duplex = ecmd->duplex; + /* user overriding the initial full duplex parm? */ + if (np->duplex == DUPLEX_HALF) np->full_duplex = 0; } - mdio_write(dev, 1, MII_BMCR, tmp); + + /* get the right phy enabled */ + if (ecmd->port == PORT_TP) + switch_port_internal(dev); + else + switch_port_external(dev); + + /* set parms and see how this affected our link status */ + init_phy_fixup(dev); + check_link(dev); return 0; } @@ -2348,11 +2895,15 @@ u32 rfcr; u32 *rbuf = (u32 *)buf; - /* read all of page 0 of registers */ - for (i = 0; i < NATSEMI_PG0_NREGS; i++) { + /* read non-mii page 0 of registers */ + for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) { rbuf[i] = readl(dev->base_addr + i*4); } + /* read current mii registers */ + for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++) + rbuf[i] = mdio_read(dev, i & 0x1f); + /* read only the 'magic' registers from page 1 */ writew(1, dev->base_addr + PGSEL); rbuf[i++] = readw(dev->base_addr + PMDCSR); @@ -2407,27 +2958,56 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct mii_ioctl_data *data = if_mii(rq); + struct netdev_private *np = netdev_priv(dev); switch(cmd) { case SIOCETHTOOL: return netdev_ethtool_ioctl(dev, rq->ifr_data); case SIOCGMIIPHY: /* Get address of MII PHY in use. */ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */ - data->phy_id = 1; + data->phy_id = np->phy_addr_external; /* Fall Through */ case SIOCGMIIREG: /* Read MII PHY register. */ case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */ - data->val_out = mdio_read(dev, data->phy_id & 0x1f, - data->reg_num & 0x1f); + /* The phy_id is not enough to uniquely identify + * the intended target. Therefore the command is sent to + * the given mii on the current port. + */ + if (dev->if_port == PORT_TP) { + if ((data->phy_id & 0x1f) == np->phy_addr_external) + data->val_out = mdio_read(dev, + data->reg_num & 0x1f); + else + data->val_out = 0; + } else { + move_int_phy(dev, data->phy_id & 0x1f); + data->val_out = miiport_read(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f); + } return 0; case SIOCSMIIREG: /* Write MII PHY register. */ case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */ if (!capable(CAP_NET_ADMIN)) return -EPERM; - mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, - data->val_in); + if (dev->if_port == PORT_TP) { + if ((data->phy_id & 0x1f) == np->phy_addr_external) { + if ((data->reg_num & 0x1f) == MII_ADVERTISE) + np->advertising = data->val_in; + mdio_write(dev, data->reg_num & 0x1f, + data->val_in); + } + } else { + if ((data->phy_id & 0x1f) == np->phy_addr_external) { + if ((data->reg_num & 0x1f) == MII_ADVERTISE) + np->advertising = data->val_in; + } + move_int_phy(dev, data->phy_id & 0x1f); + miiport_write(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, + data->val_in); + } return 0; default: return -EOPNOTSUPP; @@ -2437,7 +3017,7 @@ static void enable_wol_mode(struct net_device *dev, int enable_intr) { long ioaddr = dev->base_addr; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); if (netif_msg_wol(np)) printk(KERN_INFO "%s: remaining active for wake-on-lan\n", @@ -2470,7 +3050,7 @@ static int netdev_close(struct net_device *dev) { long ioaddr = dev->base_addr; - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); if (netif_msg_ifdown(np)) printk(KERN_DEBUG @@ -2582,7 +3162,7 @@ static int natsemi_suspend (struct pci_dev *pdev, u32 state) { struct net_device *dev = pci_get_drvdata (pdev); - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; rtnl_lock(); @@ -2629,7 +3209,7 @@ static int natsemi_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); - struct netdev_private *np = dev->priv; + struct netdev_private *np = netdev_priv(dev); rtnl_lock(); if (netif_device_present(dev)) diff -Nru a/drivers/net/r8169.c b/drivers/net/r8169.c --- a/drivers/net/r8169.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/r8169.c 2004-07-13 12:46:19 -07:00 @@ -7,7 +7,7 @@ Feb 4 2002 - created initially by ShuChen . May 20 2002 - Add link status force-mode and TBI mode support. ========================================================================= - 1. The media can be forced in 5 modes. + 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes. Command: 'insmod r8169 media = SET_MEDIA' Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex. @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -64,6 +65,14 @@ #define dprintk(fmt, args...) do {} while (0) #endif /* RTL8169_DEBUG */ +#ifdef CONFIG_R8169_NAPI +#define rtl8169_rx_skb netif_receive_skb +#define rtl8169_rx_quota(count, quota) min(count, quota) +#else +#define rtl8169_rx_skb netif_rx +#define rtl8169_rx_quota(count, quota) count +#endif + /* media options */ #define MAX_UNITS 8 static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; @@ -90,15 +99,16 @@ #define RxPacketMaxSize 0x0800 /* Maximum size supported is 16K-1 */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ +#define R8169_NAPI_WEIGHT 64 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ -#define NUM_RX_DESC 64 /* Number of Rx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ #define RX_BUF_SIZE 1536 /* Rx Buffer size */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RTL_MIN_IO_SIZE 0x80 #define RTL8169_TX_TIMEOUT (6*HZ) -#define RTL8169_PHY_TIMEOUT (HZ) +#define RTL8169_PHY_TIMEOUT (10*HZ) /* write/read MMIO register */ #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) @@ -194,7 +204,7 @@ SWInt = 0x0100, TxDescUnavail = 0x80, RxFIFOOver = 0x40, - RxUnderrun = 0x20, + LinkChg = 0x20, RxOverflow = 0x10, TxErr = 0x08, TxOK = 0x04, @@ -233,6 +243,14 @@ TxInterFrameGapShift = 24, TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + /* TBICSR p.28 */ + TBIReset = 0x80000000, + TBILoopback = 0x40000000, + TBINwEnable = 0x20000000, + TBINwRestart = 0x10000000, + TBILinkOk = 0x02000000, + TBINwComplete = 0x01000000, + /* CPlusCmd p.31 */ RxVlan = (1 << 6), RxChkSum = (1 << 5), @@ -306,10 +324,10 @@ }; struct rtl8169_private { - void *mmio_addr; /* memory map physical address */ + void *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; /* Index of PCI device */ struct net_device_stats stats; /* statistics of net device */ - spinlock_t lock; /* spin lock flag */ + spinlock_t lock; /* spin lock flag */ int chipset; int mac_version; int phy_version; @@ -317,15 +335,23 @@ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ u32 dirty_rx; u32 dirty_tx; - struct TxDesc *TxDescArray; /* Index of 256-alignment Tx Descriptor buffer */ - struct RxDesc *RxDescArray; /* Index of 256-alignment Rx Descriptor buffer */ + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ dma_addr_t TxPhyAddr; dma_addr_t RxPhyAddr; struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */ - struct sk_buff *Tx_skbuff[NUM_TX_DESC]; /* Index of Transmit data buffer */ + struct sk_buff *Tx_skbuff[NUM_TX_DESC]; /* Tx data buffers */ struct timer_list timer; - unsigned long phy_link_down_cnt; u16 cp_cmd; + u16 intr_mask; + int phy_auto_nego_reg; + int phy_1000_ctrl_reg; + + int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); + void (*get_settings)(struct net_device *, struct ethtool_cmd *); + void (*phy_reset_enable)(void *); + unsigned int (*phy_reset_pending)(void *); + unsigned int (*link_ok)(void *); }; MODULE_AUTHOR("Realtek"); @@ -344,9 +370,14 @@ static void rtl8169_set_rx_mode(struct net_device *dev); static void rtl8169_tx_timeout(struct net_device *dev); static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev); +#ifdef CONFIG_R8169_NAPI +static int rtl8169_poll(struct net_device *dev, int *budget); +#endif static const u16 rtl8169_intr_mask = - RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK; + LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK; +static const u16 rtl8169_napi_event = + RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr; static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); @@ -364,11 +395,9 @@ for (i = 2000; i > 0; i--) { // Check if the RTL8169 has completed writing to the specified MII register - if (!(RTL_R32(PHYAR) & 0x80000000)) { + if (!(RTL_R32(PHYAR) & 0x80000000)) break; - } else { - udelay(100); - } + udelay(100); } } @@ -390,18 +419,264 @@ return value; } +static unsigned int rtl8169_tbi_reset_pending(void *ioaddr) +{ + return RTL_R32(TBICSR) & TBIReset; +} + +static unsigned int rtl8169_xmii_reset_pending(void *ioaddr) +{ + return mdio_read(ioaddr, 0) & 0x8000; +} + +static unsigned int rtl8169_tbi_link_ok(void *ioaddr) +{ + return RTL_R32(TBICSR) & TBILinkOk; +} + +static unsigned int rtl8169_xmii_link_ok(void *ioaddr) +{ + return RTL_R8(PHYstatus) & LinkStatus; +} + +static void rtl8169_tbi_reset_enable(void *ioaddr) +{ + RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); +} + +static void rtl8169_xmii_reset_enable(void *ioaddr) +{ + unsigned int val; + + val = (mdio_read(ioaddr, PHY_CTRL_REG) | 0x8000) & 0xffff; + mdio_write(ioaddr, PHY_CTRL_REG, val); +} + +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, void *ioaddr) +{ + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (tp->link_ok(ioaddr)) { + netif_carrier_on(dev); + printk(KERN_INFO PFX "%s: link up\n", dev->name); + } else + netif_carrier_off(dev); + spin_unlock_irqrestore(&tp->lock, flags); +} + +static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex) +{ + struct { + u16 speed; + u8 duplex; + u8 autoneg; + u8 media; + } link_settings[] = { + { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half }, + { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full }, + { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half }, + { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full }, + { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full }, + /* Make TBI happy */ + { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff } + }, *p; + unsigned char option; + + option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff; + + if ((option != 0xff) && !idx) + printk(KERN_WARNING PFX "media option is deprecated.\n"); + + for (p = link_settings; p->media != 0xff; p++) { + if (p->media == option) + break; + } + *autoneg = p->autoneg; + *speed = p->speed; + *duplex = p->duplex; +} + static void rtl8169_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); strcpy(info->driver, RTL8169_DRIVER_NAME); strcpy(info->version, RTL8169_VERSION ); strcpy(info->bus_info, pci_name(tp->pci_dev)); } +static int rtl8169_set_speed_tbi(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void *ioaddr = tp->mmio_addr; + int ret = 0; + u32 reg; + + reg = RTL_R32(TBICSR); + if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && + (duplex == DUPLEX_FULL)) { + RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + } else if (autoneg == AUTONEG_ENABLE) + RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + else { + printk(KERN_WARNING PFX + "%s: incorrect speed setting refused in TBI mode\n", + dev->name); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int rtl8169_set_speed_xmii(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void *ioaddr = tp->mmio_addr; + int auto_nego, giga_ctrl; + + auto_nego = mdio_read(ioaddr, PHY_AUTO_NEGO_REG); + auto_nego &= ~(PHY_Cap_10_Half | PHY_Cap_10_Full | + PHY_Cap_100_Half | PHY_Cap_100_Full); + giga_ctrl = mdio_read(ioaddr, PHY_1000_CTRL_REG); + giga_ctrl &= ~(PHY_Cap_1000_Full | PHY_Cap_Null); + + if (autoneg == AUTONEG_ENABLE) { + auto_nego |= (PHY_Cap_10_Half | PHY_Cap_10_Full | + PHY_Cap_100_Half | PHY_Cap_100_Full); + giga_ctrl |= PHY_Cap_1000_Full; + } else { + if (speed == SPEED_10) + auto_nego |= PHY_Cap_10_Half | PHY_Cap_10_Full; + else if (speed == SPEED_100) + auto_nego |= PHY_Cap_100_Half | PHY_Cap_100_Full; + else if (speed == SPEED_1000) + giga_ctrl |= PHY_Cap_1000_Full; + + if (duplex == DUPLEX_HALF) + auto_nego &= ~(PHY_Cap_10_Full | PHY_Cap_100_Full); + } + + tp->phy_auto_nego_reg = auto_nego; + tp->phy_1000_ctrl_reg = giga_ctrl; + + mdio_write(ioaddr, PHY_AUTO_NEGO_REG, auto_nego); + mdio_write(ioaddr, PHY_1000_CTRL_REG, giga_ctrl); + mdio_write(ioaddr, PHY_CTRL_REG, PHY_Enable_Auto_Nego | + PHY_Restart_Auto_Nego); + return 0; +} + +static int rtl8169_set_speed(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex); + + if (netif_running(dev) && (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full)) + mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); + + return ret; +} + +static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&tp->lock, flags); + ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex); + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void *ioaddr = tp->mmio_addr; + u32 status; + + cmd->supported = + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + + status = RTL_R32(TBICSR); + cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->autoneg = !!(status & TBINwEnable); + + cmd->speed = SPEED_1000; + cmd->duplex = DUPLEX_FULL; /* Always set */ +} + +static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void *ioaddr = tp->mmio_addr; + u8 status; + + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP; + + cmd->autoneg = 1; + cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; + + if (tp->phy_auto_nego_reg & PHY_Cap_10_Half) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (tp->phy_auto_nego_reg & PHY_Cap_10_Full) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (tp->phy_auto_nego_reg & PHY_Cap_100_Half) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (tp->phy_auto_nego_reg & PHY_Cap_100_Full) + cmd->advertising |= ADVERTISED_100baseT_Full; + if (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full) + cmd->advertising |= ADVERTISED_1000baseT_Full; + + status = RTL_R8(PHYstatus); + + if (status & _1000bpsF) + cmd->speed = SPEED_1000; + else if (status & _100bps) + cmd->speed = SPEED_100; + else if (status & _10bps) + cmd->speed = SPEED_10; + + cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ? + DUPLEX_FULL : DUPLEX_HALF; +} + +static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + + tp->get_settings(dev, cmd); + + spin_unlock_irqrestore(&tp->lock, flags); + return 0; +} + + static struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_settings = rtl8169_get_settings, + .set_settings = rtl8169_set_settings, }; static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum, @@ -500,7 +775,7 @@ static void rtl8169_hw_phy_config(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); void *ioaddr = tp->mmio_addr; struct { u16 regs[5]; /* Beware of bit-sign propagation */ @@ -566,61 +841,47 @@ mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0 } -static void rtl8169_hw_phy_reset(struct net_device *dev) -{ - struct rtl8169_private *tp = dev->priv; - void *ioaddr = tp->mmio_addr; - int i, val; - - printk(KERN_WARNING PFX "%s: Reset RTL8169s PHY\n", dev->name); - - val = (mdio_read(ioaddr, 0) | 0x8000) & 0xffff; - mdio_write(ioaddr, 0, val); - - for (i = 50; i >= 0; i--) { - if (!(mdio_read(ioaddr, 0) & 0x8000)) - break; - udelay(100); /* Gross */ - } - - if (i < 0) { - printk(KERN_WARNING PFX "%s: no PHY Reset ack. Giving up.\n", - dev->name); - } -} - static void rtl8169_phy_timer(unsigned long __opaque) { struct net_device *dev = (struct net_device *)__opaque; - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; void *ioaddr = tp->mmio_addr; + unsigned long timeout = RTL8169_PHY_TIMEOUT; assert(tp->mac_version > RTL_GIGA_MAC_VER_B); assert(tp->phy_version < RTL_GIGA_PHY_VER_G); - if (RTL_R8(PHYstatus) & LinkStatus) - tp->phy_link_down_cnt = 0; - else { - tp->phy_link_down_cnt++; - if (tp->phy_link_down_cnt >= 12) { - int reg; - - // If link on 1000, perform phy reset. - reg = mdio_read(ioaddr, PHY_1000_CTRL_REG); - if (reg & PHY_Cap_1000_Full) - rtl8169_hw_phy_reset(dev); + if (!(tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full)) + return; - tp->phy_link_down_cnt = 0; - } + spin_lock_irq(&tp->lock); + + if (tp->phy_reset_pending(ioaddr)) { + /* + * A busy loop could burn quite a few cycles on nowadays CPU. + * Let's delay the execution of the timer for a few ticks. + */ + timeout = HZ/10; + goto out_mod_timer; } - mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT); + if (tp->link_ok(ioaddr)) + goto out_unlock; + + printk(KERN_WARNING PFX "%s: PHY reset until link up\n", dev->name); + + tp->phy_reset_enable(ioaddr); + +out_mod_timer: + mod_timer(timer, jiffies + timeout); +out_unlock: + spin_unlock_irq(&tp->lock); } static inline void rtl8169_delete_timer(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) || @@ -628,21 +889,17 @@ return; del_timer_sync(timer); - - tp->phy_link_down_cnt = 0; } static inline void rtl8169_request_timer(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) || (tp->phy_version >= RTL_GIGA_PHY_VER_G)) return; - tp->phy_link_down_cnt = 0; - init_timer(timer); timer->expires = jiffies + RTL8169_PHY_TIMEOUT; timer->data = (unsigned long)(dev); @@ -681,7 +938,7 @@ // enable device (incl. PCI PM wakeup and hotplug setup) rc = pci_enable_device(pdev); if (rc) { - printk(KERN_ERR PFX "%s: unable to enable device\n", pdev->slot_name); + printk(KERN_ERR PFX "%s: enable failure\n", pdev->slot_name); goto err_out; } @@ -693,7 +950,8 @@ pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK; } else { - printk(KERN_ERR PFX "Cannot find PowerManagement capability, aborting.\n"); + printk(KERN_ERR PFX + "Cannot find PowerManagement capability, aborting.\n"); goto err_out_free_res; } @@ -718,7 +976,8 @@ rc = pci_request_regions(pdev, MODULENAME); if (rc) { - printk(KERN_ERR PFX "%s: Could not request regions.\n", pdev->slot_name); + printk(KERN_ERR PFX "%s: could not request regions.\n", + pdev->slot_name); goto err_out_disable; } @@ -800,8 +1059,9 @@ void *ioaddr = NULL; static int board_idx = -1; static int printed_version = 0; + u8 autoneg, duplex; + u16 speed; int i, rc; - int option = -1, Cap10_100 = 0, Cap1000 = 0; assert(pdev != NULL); assert(ent != NULL); @@ -822,6 +1082,22 @@ assert(dev != NULL); assert(tp != NULL); + if (RTL_R8(PHYstatus) & TBI_Enable) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + + tp->phy_1000_ctrl_reg = PHY_Cap_1000_Full; /* Implied by TBI */ + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + } + // Get MAC address. FIXME: read EEPROM for (i = 0; i < MAC_ADDR_LEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); @@ -836,9 +1112,12 @@ dev->watchdog_timeo = RTL8169_TX_TIMEOUT; dev->irq = pdev->irq; dev->base_addr = (unsigned long) ioaddr; -// dev->do_ioctl = mii_ioctl; - - tp = dev->priv; // private data // +#ifdef CONFIG_R8169_NAPI + dev->poll = rtl8169_poll; + dev->weight = R8169_NAPI_WEIGHT; + printk(KERN_INFO PFX "NAPI enabled\n"); +#endif + tp->intr_mask = 0xffff; tp->pci_dev = pdev; tp->mmio_addr = ioaddr; @@ -885,95 +1164,12 @@ mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0 } - // if TBI is not endbled - if (!(RTL_R8(PHYstatus) & TBI_Enable)) { - int val = mdio_read(ioaddr, PHY_AUTO_NEGO_REG); - - option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx]; - // Force RTL8169 in 10/100/1000 Full/Half mode. - if (option > 0) { - printk(KERN_INFO "%s: Force-mode Enabled.\n", - dev->name); - Cap10_100 = 0, Cap1000 = 0; - switch (option) { - case _10_Half: - Cap10_100 = PHY_Cap_10_Half_Or_Less; - Cap1000 = PHY_Cap_Null; - break; - case _10_Full: - Cap10_100 = PHY_Cap_10_Full_Or_Less; - Cap1000 = PHY_Cap_Null; - break; - case _100_Half: - Cap10_100 = PHY_Cap_100_Half_Or_Less; - Cap1000 = PHY_Cap_Null; - break; - case _100_Full: - Cap10_100 = PHY_Cap_100_Full_Or_Less; - Cap1000 = PHY_Cap_Null; - break; - case _1000_Full: - Cap10_100 = PHY_Cap_100_Full_Or_Less; - Cap1000 = PHY_Cap_1000_Full; - break; - default: - break; - } - mdio_write(ioaddr, PHY_AUTO_NEGO_REG, Cap10_100 | (val & 0x1F)); //leave PHY_AUTO_NEGO_REG bit4:0 unchanged - mdio_write(ioaddr, PHY_1000_CTRL_REG, Cap1000); - } else { - printk(KERN_INFO "%s: Auto-negotiation Enabled.\n", - dev->name); - - // enable 10/100 Full/Half Mode, leave PHY_AUTO_NEGO_REG bit4:0 unchanged - mdio_write(ioaddr, PHY_AUTO_NEGO_REG, - PHY_Cap_100_Full_Or_Less | (val & 0x1f)); - - // enable 1000 Full Mode - mdio_write(ioaddr, PHY_1000_CTRL_REG, - PHY_Cap_1000_Full); - - } - - // Enable auto-negotiation and restart auto-nigotiation - mdio_write(ioaddr, PHY_CTRL_REG, - PHY_Enable_Auto_Nego | PHY_Restart_Auto_Nego); - udelay(100); - - // wait for auto-negotiation process - for (i = 10000; i > 0; i--) { - //check if auto-negotiation complete - if (mdio_read(ioaddr, PHY_STAT_REG) & - PHY_Auto_Neco_Comp) { - udelay(100); - option = RTL_R8(PHYstatus); - if (option & _1000bpsF) { - printk(KERN_INFO - "%s: 1000Mbps Full-duplex operation.\n", - dev->name); - } else { - printk(KERN_INFO - "%s: %sMbps %s-duplex operation.\n", - dev->name, - (option & _100bps) ? "100" : - "10", - (option & FullDup) ? "Full" : - "Half"); - } - break; - } else { - udelay(100); - } - } // end for-loop to wait for auto-negotiation process - - } else { - udelay(100); - printk(KERN_INFO - "%s: 1000Mbps Full-duplex operation, TBI Link %s!\n", - dev->name, - (RTL_R32(TBICSR) & TBILinkOK) ? "OK" : "Failed"); + rtl8169_link_option(board_idx, &autoneg, &speed, &duplex); - } + rtl8169_set_speed(dev, autoneg, speed, duplex); + + if (RTL_R8(PHYstatus) & TBI_Enable) + printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name); return 0; } @@ -982,7 +1178,7 @@ rtl8169_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); assert(dev != NULL); assert(tp != NULL); @@ -1001,7 +1197,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, u32 state) { struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); void *ioaddr = tp->mmio_addr; unsigned long flags; @@ -1042,7 +1238,7 @@ static int rtl8169_open(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; int retval; @@ -1074,6 +1270,8 @@ rtl8169_hw_start(dev); rtl8169_request_timer(dev); + + rtl8169_check_link_status(dev, tp, tp->mmio_addr); out: return retval; @@ -1091,7 +1289,7 @@ static void rtl8169_hw_start(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); void *ioaddr = tp->mmio_addr; u32 i; @@ -1102,8 +1300,7 @@ for (i = 1000; i > 0; i--) { if ((RTL_R8(ChipCmd) & CmdReset) == 0) break; - else - udelay(10); + udelay(10); } RTL_W8(Cfg9346, Cfg9346_Unlock); @@ -1114,8 +1311,8 @@ RTL_W16(RxMaxSize, RxPacketMaxSize); // Set Rx Config register - i = rtl8169_rx_config | (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset]. - RxConfigMask); + i = rtl8169_rx_config | + (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); RTL_W32(RxConfig, i); /* Set DMA burst size and Interframe Gap Time */ @@ -1126,7 +1323,8 @@ RTL_W16(CPlusCmd, tp->cp_cmd); if (tp->mac_version == RTL_GIGA_MAC_VER_D) { - dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14 MUST be 1\n"); + dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. " + "Bit-3 and bit-14 MUST be 1\n"); tp->cp_cmd |= (1 << 14) | PCIMulRW; RTL_W16(CPlusCmd, tp->cp_cmd); } @@ -1151,7 +1349,6 @@ RTL_W16(IntrMask, rtl8169_intr_mask); netif_start_queue(dev); - } static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) @@ -1248,7 +1445,7 @@ static int rtl8169_init_ring(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); tp->cur_rx = tp->dirty_rx = 0; tp->cur_tx = tp->dirty_tx = 0; @@ -1302,10 +1499,11 @@ static void rtl8169_tx_timeout(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); void *ioaddr = tp->mmio_addr; u8 tmp8; + printk(KERN_INFO "%s: TX Timeout\n", dev->name); /* disable Tx, if not already */ tmp8 = RTL_R8(ChipCmd); if (tmp8 & CmdTxEnb) @@ -1328,9 +1526,9 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); void *ioaddr = tp->mmio_addr; - int entry = tp->cur_tx % NUM_TX_DESC; + unsigned int entry = tp->cur_tx % NUM_TX_DESC; u32 len = skb->len; if (unlikely(skb->len < ETH_ZLEN)) { @@ -1340,10 +1538,9 @@ len = ETH_ZLEN; } - spin_lock_irq(&tp->lock); - if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) { dma_addr_t mapping; + u32 status; mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); @@ -1351,24 +1548,30 @@ tp->Tx_skbuff[entry] = skb; tp->TxDescArray[entry].addr = cpu_to_le64(mapping); - tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit | - LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC))); + /* anti gcc 2.95.3 bugware */ + status = OWNbit | FSbit | LSbit | len | + (EORbit * !((entry + 1) % NUM_TX_DESC)); + tp->TxDescArray[entry].status = cpu_to_le32(status); RTL_W8(TxPoll, 0x40); //set polling bit dev->trans_start = jiffies; tp->cur_tx++; + smp_wmb(); } else goto err_drop; - if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) { + u32 dirty = tp->dirty_tx; + netif_stop_queue(dev); + smp_rmb(); + if (dirty != tp->dirty_tx) + netif_wake_queue(dev); } -out: - spin_unlock_irq(&tp->lock); +out: return 0; err_drop: @@ -1382,17 +1585,18 @@ rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp, void *ioaddr) { - unsigned long dirty_tx, tx_left; + unsigned int dirty_tx, tx_left; assert(dev != NULL); assert(tp != NULL); assert(ioaddr != NULL); dirty_tx = tp->dirty_tx; + smp_rmb(); tx_left = tp->cur_tx - dirty_tx; while (tx_left > 0) { - int entry = dirty_tx % NUM_TX_DESC; + unsigned int entry = dirty_tx % NUM_TX_DESC; struct sk_buff *skb = tp->Tx_skbuff[entry]; u32 status; @@ -1415,6 +1619,7 @@ if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; + smp_wmb(); if (netif_queue_stopped(dev)) netif_wake_queue(dev); } @@ -1442,11 +1647,11 @@ return ret; } -static void +static int rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, void *ioaddr) { - unsigned long cur_rx, rx_left; + unsigned int cur_rx, rx_left, count; int delta; assert(dev != NULL); @@ -1455,9 +1660,10 @@ cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; + rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota); while (rx_left > 0) { - int entry = cur_rx % NUM_RX_DESC; + unsigned int entry = cur_rx % NUM_RX_DESC; u32 status; rmb(); @@ -1494,7 +1700,7 @@ skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); + rtl8169_rx_skb(skb); dev->last_rx = jiffies; tp->stats.rx_bytes += pkt_size; @@ -1505,13 +1711,15 @@ rx_left--; } + count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); - if (delta > 0) - tp->dirty_rx += delta; - else if (delta < 0) + if (delta < 0) { printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name); + delta = 0; + } + tp->dirty_rx += delta; /* * FIXME: until there is periodic timer to try and refill the ring, @@ -1522,6 +1730,8 @@ */ if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name); + + return count; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ @@ -1529,7 +1739,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs) { struct net_device *dev = (struct net_device *) dev_instance; - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); int boguscnt = max_interrupt_work; void *ioaddr = tp->mmio_addr; int status = 0; @@ -1543,26 +1753,37 @@ break; handled = 1; -/* - if (status & RxUnderrun) - link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit; -*/ + + status &= tp->intr_mask; RTL_W16(IntrStatus, (status & RxFIFOOver) ? (status | RxOverflow) : status); if (!(status & rtl8169_intr_mask)) break; + if (status & LinkChg) + rtl8169_check_link_status(dev, tp, ioaddr); + +#ifdef CONFIG_R8169_NAPI + RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event); + tp->intr_mask = ~rtl8169_napi_event; + + if (likely(netif_rx_schedule_prep(dev))) + __netif_rx_schedule(dev); + else { + printk(KERN_INFO "%s: interrupt %x taken in poll\n", + dev->name, status); + } + break; +#else // Rx interrupt - if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) { + if (status & (RxOK | RxOverflow | RxFIFOOver)) { rtl8169_rx_interrupt(dev, tp, ioaddr); } // Tx interrupt - if (status & (TxOK | TxErr)) { - spin_lock(&tp->lock); + if (status & (TxOK | TxErr)) rtl8169_tx_interrupt(dev, tp, ioaddr); - spin_unlock(&tp->lock); - } +#endif boguscnt--; } while (boguscnt > 0); @@ -1576,10 +1797,40 @@ return IRQ_RETVAL(handled); } +#ifdef CONFIG_R8169_NAPI +static int rtl8169_poll(struct net_device *dev, int *budget) +{ + unsigned int work_done, work_to_do = min(*budget, dev->quota); + struct rtl8169_private *tp = netdev_priv(dev); + void *ioaddr = tp->mmio_addr; + + work_done = rtl8169_rx_interrupt(dev, tp, ioaddr); + rtl8169_tx_interrupt(dev, tp, ioaddr); + + *budget -= work_done; + dev->quota -= work_done; + + if ((work_done < work_to_do) || !netif_running(dev)) { + netif_rx_complete(dev); + tp->intr_mask = 0xffff; + /* + * 20040426: the barrier is not strictly required but the + * behavior of the irq handler could be less predictable + * without it. Btw, the lack of flush for the posted pci + * write is safe - FR + */ + smp_wmb(); + RTL_W16(IntrMask, rtl8169_intr_mask); + } + + return (work_done >= work_to_do); +} +#endif + static int rtl8169_close(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; void *ioaddr = tp->mmio_addr; @@ -1621,7 +1872,7 @@ static void rtl8169_set_rx_mode(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); void *ioaddr = tp->mmio_addr; unsigned long flags; u32 mc_filter[2]; /* Multicast hash filter */ @@ -1655,10 +1906,8 @@ spin_lock_irqsave(&tp->lock, flags); - tmp = - rtl8169_rx_config | rx_mode | (RTL_R32(RxConfig) & - rtl_chip_info[tp->chipset]. - RxConfigMask); + tmp = rtl8169_rx_config | rx_mode | + (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); RTL_W32(RxConfig, tmp); RTL_W32(MAR0 + 0, mc_filter[0]); @@ -1675,7 +1924,7 @@ */ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) { - struct rtl8169_private *tp = dev->priv; + struct rtl8169_private *tp = netdev_priv(dev); void *ioaddr = tp->mmio_addr; unsigned long flags; diff -Nru a/drivers/net/sis900.c b/drivers/net/sis900.c --- a/drivers/net/sis900.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/sis900.c 2004-07-13 12:46:19 -07:00 @@ -116,6 +116,7 @@ #define HOME 0x0001 #define LAN 0x0002 #define MIX 0x0003 +#define UNKNOWN 0x0 } mii_chip_table[] = { { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, @@ -577,9 +578,11 @@ break; } - if( !mii_chip_table[i].phy_id1 ) + if( !mii_chip_table[i].phy_id1 ) { printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n", - net_dev->name, phy_addr); + net_dev->name, phy_addr); + mii_phy->phy_types = UNKNOWN; + } } if (sis_priv->mii == NULL) { @@ -644,15 +647,15 @@ static u16 sis900_default_phy(struct net_device * net_dev) { struct sis900_private * sis_priv = net_dev->priv; - struct mii_phy *phy = NULL, *phy_home = NULL, *default_phy = NULL; + struct mii_phy *phy = NULL, *phy_home = NULL, *default_phy = NULL, *phy_lan = NULL; u16 status; for( phy=sis_priv->first_mii; phy; phy=phy->next ){ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); - /* Link ON & Not select deafalut PHY */ - if ( (status & MII_STAT_LINK) && !(default_phy) ) + /* Link ON & Not select default PHY & not ghost PHY */ + if ( (status & MII_STAT_LINK) && !default_phy && (phy->phy_types != UNKNOWN) ) default_phy = phy; else{ status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL); @@ -660,12 +663,16 @@ status | MII_CNTL_AUTO | MII_CNTL_ISOLATE); if( phy->phy_types == HOME ) phy_home = phy; + else if (phy->phy_types == LAN) + phy_lan = phy; } } - if( (!default_phy) && phy_home ) + if( !default_phy && phy_home ) default_phy = phy_home; - else if(!default_phy) + else if( !default_phy && phy_lan ) + default_phy = phy_lan; + else if ( !default_phy ) default_phy = sis_priv->first_mii; if( sis_priv->mii != default_phy ){ diff -Nru a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h --- a/drivers/net/sk98lin/h/skdrv2nd.h 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/sk98lin/h/skdrv2nd.h 2004-07-13 12:46:19 -07:00 @@ -53,60 +53,6 @@ #include "h/skrlmt.h" #include "h/skgedrv.h" -#define SK_PCI_ISCOMPLIANT(result, pdev) { \ - result = SK_FALSE; /* default */ \ - /* 3Com (0x10b7) */ \ - if (pdev->vendor == 0x10b7) { \ - /* Gigabit Ethernet Adapter (0x1700) */ \ - if ((pdev->device == 0x1700) || \ - (pdev->device == 0x80eb)) { \ - result = SK_TRUE; \ - } \ - /* SysKonnect (0x1148) */ \ - } else if (pdev->vendor == 0x1148) { \ - /* SK-98xx Gigabit Ethernet Server Adapter (0x4300) */ \ - /* SK-98xx V2.0 Gigabit Ethernet Adapter (0x4320) */ \ - if ((pdev->device == 0x4300) || \ - (pdev->device == 0x4320)) { \ - result = SK_TRUE; \ - } \ - /* D-Link (0x1186) */ \ - } else if (pdev->vendor == 0x1186) { \ - /* Gigabit Ethernet Adapter (0x4c00) */ \ - if ((pdev->device == 0x4c00)) { \ - result = SK_TRUE; \ - } \ - /* Marvell (0x11ab) */ \ - } else if (pdev->vendor == 0x11ab) { \ - /* Gigabit Ethernet Adapter (0x4320) */ \ - /* Gigabit Ethernet Adapter (0x4360) */ \ - /* Gigabit Ethernet Adapter (0x4361) */ \ - /* Belkin (0x5005) */ \ - if ((pdev->device == 0x4320) || \ - (pdev->device == 0x4360) || \ - (pdev->device == 0x4361) || \ - (pdev->device == 0x5005)) { \ - result = SK_TRUE; \ - } \ - /* CNet (0x1371) */ \ - } else if (pdev->vendor == 0x1371) { \ - /* GigaCard Network Adapter (0x434e) */ \ - if ((pdev->device == 0x434e)) { \ - result = SK_TRUE; \ - } \ - /* Linksys (0x1737) */ \ - } else if (pdev->vendor == 0x1737) { \ - /* Gigabit Network Adapter (0x1032) */ \ - /* Gigabit Network Adapter (0x1064) */ \ - if ((pdev->device == 0x1032) || \ - (pdev->device == 0x1064)) { \ - result = SK_TRUE; \ - } \ - } else { \ - result = SK_FALSE; \ - } \ -} - extern SK_MBUF *SkDrvAllocRlmtMbuf(SK_AC*, SK_IOC, unsigned); extern void SkDrvFreeRlmtMbuf(SK_AC*, SK_IOC, SK_MBUF*); diff -Nru a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c --- a/drivers/net/sk98lin/skge.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/sk98lin/skge.c 2004-07-13 12:46:19 -07:00 @@ -239,7 +239,7 @@ #ifdef CONFIG_PROC_FS static const char SK_Root_Dir_entry[] = "sk98lin"; -static struct proc_dir_entry *pSkRootDir = NULL; +static struct proc_dir_entry *pSkRootDir; extern struct file_operations sk_proc_fops; #endif @@ -255,303 +255,13 @@ #endif /* global variables *********************************************************/ -static const char *BootString = BOOT_STRING; struct SK_NET_DEVICE *SkGeRootDev = NULL; -static int probed __initdata = 0; static SK_BOOL DoPrintInterfaceChange = SK_TRUE; /* local variables **********************************************************/ static uintptr_t TxQueueAddr[SK_MAX_MACS][2] = {{0x680, 0x600},{0x780, 0x700}}; static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480}; - -#ifdef CONFIG_PROC_FS -static struct proc_dir_entry *pSkRootDir; -#endif - - - -/***************************************************************************** - * - * skge_probe - find all SK-98xx adapters - * - * Description: - * This function scans the PCI bus for SK-98xx adapters. Resources for - * each adapter are allocated and the adapter is brought into Init 1 - * state. - * - * Returns: - * 0, if everything is ok - * !=0, on error - */ -static int __init skge_probe (void) -{ - int boards_found = 0; - int vendor_flag = SK_FALSE; - SK_AC *pAC; - DEV_NET *pNet = NULL; - struct pci_dev *pdev = NULL; - struct SK_NET_DEVICE *dev = NULL; - SK_BOOL DeviceFound = SK_FALSE; - SK_BOOL BootStringCount = SK_FALSE; - int retval; -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *pProcFile; -#endif - - if (probed) - return -ENODEV; - probed++; - - - while((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) { - - if (pci_enable_device(pdev)) { - continue; - } - dev = NULL; - pNet = NULL; - - /* Don't handle Yukon2 cards at the moment */ - /* 12-feb-2004 ---- mlindner@syskonnect.de */ - if (pdev->vendor == 0x11ab) { - if ( (pdev->device == 0x4360) || (pdev->device == 0x4361) ) - continue; - } - - SK_PCI_ISCOMPLIANT(vendor_flag, pdev); - if (!vendor_flag) - continue; - - /* Configure DMA attributes. */ - if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL) && - pci_set_dma_mask(pdev, (u64) 0xffffffff)) - continue; - - - if ((dev = alloc_etherdev(sizeof(DEV_NET))) == NULL) { - printk(KERN_ERR "Unable to allocate etherdev " - "structure!\n"); - break; - } - - pNet = dev->priv; - pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL); - if (pNet->pAC == NULL){ - free_netdev(dev); - printk(KERN_ERR "Unable to allocate adapter " - "structure!\n"); - break; - } - - /* Print message */ - if (!BootStringCount) { - /* set display flag to TRUE so that */ - /* we only display this string ONCE */ - BootStringCount = SK_TRUE; - printk("%s\n", BootString); - } - - memset(pNet->pAC, 0, sizeof(SK_AC)); - pAC = pNet->pAC; - pAC->PciDev = pdev; - pAC->PciDevId = pdev->device; - pAC->dev[0] = dev; - pAC->dev[1] = dev; - sprintf(pAC->Name, "SysKonnect SK-98xx"); - pAC->CheckQueue = SK_FALSE; - - pNet->Mtu = 1500; - pNet->Up = 0; - dev->irq = pdev->irq; - retval = SkGeInitPCI(pAC); - if (retval) { - printk("SKGE: PCI setup failed: %i\n", retval); - free_netdev(dev); - continue; - } - - SET_MODULE_OWNER(dev); - dev->open = &SkGeOpen; - dev->stop = &SkGeClose; - dev->hard_start_xmit = &SkGeXmit; - dev->get_stats = &SkGeStats; - dev->last_stats = &SkGeStats; - dev->set_multicast_list = &SkGeSetRxMode; - dev->set_mac_address = &SkGeSetMacAddr; - dev->do_ioctl = &SkGeIoctl; - dev->change_mtu = &SkGeChangeMtu; - dev->flags &= ~IFF_RUNNING; - SET_NETDEV_DEV(dev, &pdev->dev); - -#ifdef SK_ZEROCOPY -#ifdef USE_SK_TX_CHECKSUM - - if (pAC->ChipsetType) { - /* Use only if yukon hardware */ - /* SK and ZEROCOPY - fly baby... */ - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; - } -#endif -#endif - - pAC->Index = boards_found; - - if (SkGeBoardInit(dev, pAC)) { - free_netdev(dev); - continue; - } - - /* Register net device */ - if (register_netdev(dev)) { - printk(KERN_ERR "SKGE: Could not register device.\n"); - FreeResources(dev); - free_netdev(dev); - continue; - } - - /* Print adapter specific string from vpd */ - ProductStr(pAC); - printk("%s: %s\n", dev->name, pAC->DeviceStr); - - /* Print configuration settings */ - printk(" PrefPort:%c RlmtMode:%s\n", - 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber, - (pAC->RlmtMode==0) ? "Check Link State" : - ((pAC->RlmtMode==1) ? "Check Link State" : - ((pAC->RlmtMode==3) ? "Check Local Port" : - ((pAC->RlmtMode==7) ? "Check Segmentation" : - ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error"))))); - - SkGeYellowLED(pAC, pAC->IoBase, 1); - - - memcpy((caddr_t) &dev->dev_addr, - (caddr_t) &pAC->Addr.Net[0].CurrentMacAddress, 6); - - /* First adapter... Create proc and print message */ -#ifdef CONFIG_PROC_FS - if (!DeviceFound) { - DeviceFound = SK_TRUE; - SK_MEMCPY(&SK_Root_Dir_entry, BootString, - sizeof(SK_Root_Dir_entry) - 1); - - /*Create proc (directory)*/ - if(!pSkRootDir) { - pSkRootDir = proc_mkdir(SK_Root_Dir_entry, proc_net); - if (!pSkRootDir) { - printk(KERN_WARNING "%s: Unable to create /proc/net/%s", - dev->name, SK_Root_Dir_entry); - } else { - pSkRootDir->owner = THIS_MODULE; - } - } - } - - /* Create proc file */ - if (pSkRootDir && - (pProcFile = create_proc_entry(dev->name, S_IRUGO, - pSkRootDir))) { - pProcFile->proc_fops = &sk_proc_fops; - pProcFile->data = dev; - } - -#endif - - pNet->PortNr = 0; - pNet->NetNr = 0; - - boards_found++; - - /* More then one port found */ - if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { - if ((dev = alloc_etherdev(sizeof(DEV_NET))) == 0) { - printk(KERN_ERR "Unable to allocate etherdev " - "structure!\n"); - break; - } - - pAC->dev[1] = dev; - pNet = dev->priv; - pNet->PortNr = 1; - pNet->NetNr = 1; - pNet->pAC = pAC; - pNet->Mtu = 1500; - pNet->Up = 0; - - dev->open = &SkGeOpen; - dev->stop = &SkGeClose; - dev->hard_start_xmit = &SkGeXmit; - dev->get_stats = &SkGeStats; - dev->last_stats = &SkGeStats; - dev->set_multicast_list = &SkGeSetRxMode; - dev->set_mac_address = &SkGeSetMacAddr; - dev->do_ioctl = &SkGeIoctl; - dev->change_mtu = &SkGeChangeMtu; - dev->flags &= ~IFF_RUNNING; - -#ifdef SK_ZEROCOPY -#ifdef USE_SK_TX_CHECKSUM - if (pAC->ChipsetType) { - /* SG and ZEROCOPY - fly baby... */ - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; - } -#endif -#endif - - if (register_netdev(dev)) { - printk(KERN_ERR "SKGE: Could not register device.\n"); - free_netdev(dev); - pAC->dev[1] = pAC->dev[0]; - } else { -#ifdef CONFIG_PROC_FS - if (pSkRootDir - && (pProcFile = create_proc_entry(dev->name, - S_IRUGO, pSkRootDir))) { - pProcFile->proc_fops = &sk_proc_fops; - pProcFile->data = dev; - } -#endif - - memcpy((caddr_t) &dev->dev_addr, - (caddr_t) &pAC->Addr.Net[1].CurrentMacAddress, 6); - - printk("%s: %s\n", dev->name, pAC->DeviceStr); - printk(" PrefPort:B RlmtMode:Dual Check Link State\n"); - } - } - - /* Save the hardware revision */ - pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) + - (pAC->GIni.GIPciHwRev & 0x0F); - - /* Set driver globals */ - pAC->Pnmi.pDriverFileName = DRIVER_FILE_NAME; - pAC->Pnmi.pDriverReleaseDate = DRIVER_REL_DATE; - - SK_MEMSET(&(pAC->PnmiBackup), 0, sizeof(SK_PNMI_STRUCT_DATA)); - SK_MEMCPY(&(pAC->PnmiBackup), &(pAC->PnmiStruct), - sizeof(SK_PNMI_STRUCT_DATA)); - - /* - * This is bollocks, but we need to tell the net-init - * code that it shall go for the next device. - */ -#ifndef MODULE - dev->base_addr = 0; -#endif - } - - /* - * If we're at this point we're going through skge_probe() for - * the first time. Return success (0) if we've initialized 1 - * or more boards. Otherwise, return failure (-ENODEV). - */ - - return boards_found; -} /* skge_probe */ - - /***************************************************************************** * * SkGeInitPCI - Init the PCI resources @@ -666,9 +376,6 @@ MODULE_PARM(ConType, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(PrefPort, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(RlmtMode, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); -/* not used, just there because every driver should have them: */ -MODULE_PARM(options, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "i"); -MODULE_PARM(debug, "i"); /* used for interrupt moderation */ MODULE_PARM(IntsPerSec, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "i"); MODULE_PARM(Moderation, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); @@ -755,123 +462,12 @@ static char *RlmtMode[SK_MAX_CARD_PARAM] = {"", }; #endif -static int debug = 0; /* not used */ -static int options[SK_MAX_CARD_PARAM] = {0, }; /* not used */ - static int IntsPerSec[SK_MAX_CARD_PARAM]; static char *Moderation[SK_MAX_CARD_PARAM]; static char *ModerationMask[SK_MAX_CARD_PARAM]; static char *AutoSizing[SK_MAX_CARD_PARAM]; static char *Stats[SK_MAX_CARD_PARAM]; - -/***************************************************************************** - * - * skge_init_module - module initialization function - * - * Description: - * Very simple, only call skge_probe and return approriate result. - * - * Returns: - * 0, if everything is ok - * !=0, on error - */ -static int __init skge_init_module(void) -{ - int cards; - SkGeRootDev = NULL; - - /* just to avoid warnings ... */ - debug = 0; - options[0] = 0; - - cards = skge_probe(); - if (cards == 0) { - printk("sk98lin: No adapter found.\n"); - } - return cards ? 0 : -ENODEV; -} /* skge_init_module */ - - -/***************************************************************************** - * - * skge_cleanup_module - module unload function - * - * Description: - * Disable adapter if it is still running, free resources, - * free device struct. - * - * Returns: N/A - */ -static void __exit skge_cleanup_module(void) -{ -DEV_NET *pNet; -SK_AC *pAC; -struct SK_NET_DEVICE *next; -unsigned long Flags; -SK_EVPARA EvPara; - - while (SkGeRootDev) { - pNet = (DEV_NET*) SkGeRootDev->priv; - pAC = pNet->pAC; - next = pAC->Next; - - netif_stop_queue(SkGeRootDev); - SkGeYellowLED(pAC, pAC->IoBase, 0); - - if(pAC->BoardLevel == SK_INIT_RUN) { - /* board is still alive */ - spin_lock_irqsave(&pAC->SlowPathLock, Flags); - EvPara.Para32[0] = 0; - EvPara.Para32[1] = -1; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); - EvPara.Para32[0] = 1; - EvPara.Para32[1] = -1; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); - SkEventDispatcher(pAC, pAC->IoBase); - /* disable interrupts */ - SK_OUT32(pAC->IoBase, B0_IMSK, 0); - SkGeDeInit(pAC, pAC->IoBase); - spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); - pAC->BoardLevel = SK_INIT_DATA; - /* We do NOT check here, if IRQ was pending, of course*/ - } - - if(pAC->BoardLevel == SK_INIT_IO) { - /* board is still alive */ - SkGeDeInit(pAC, pAC->IoBase); - pAC->BoardLevel = SK_INIT_DATA; - } - - if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 2){ - unregister_netdev(pAC->dev[1]); - free_netdev(pAC->dev[1]); - } - - FreeResources(SkGeRootDev); - - SkGeRootDev->get_stats = NULL; - /* - * otherwise unregister_netdev calls get_stats with - * invalid IO ... :-( - */ - unregister_netdev(SkGeRootDev); - free_netdev(SkGeRootDev); - kfree(pAC); - SkGeRootDev = next; - } - -#ifdef CONFIG_PROC_FS - /* clear proc-dir */ - remove_proc_entry(pSkRootDir->name, proc_net); -#endif - -} /* skge_cleanup_module */ - -module_init(skge_init_module); -module_exit(skge_cleanup_module); - - /***************************************************************************** * * SkGeBoardInit - do level 0 and 1 initialization @@ -3094,8 +2690,7 @@ SkEventDispatcher(pAC, pAC->IoBase); for (i=0; iGIni.GIMacsFound; i++) { - spin_lock_irqsave( - &pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock, Flags); + spin_lock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock); netif_stop_queue(pAC->dev[i]); } @@ -4774,12 +4369,10 @@ spin_lock_irqsave( &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); - spin_lock_irqsave( - &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); + spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_SOFT_RST); SkGeStopPort(pAC, IoC, ToPort, SK_STOP_ALL, SK_SOFT_RST); - spin_unlock_irqrestore( - &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); + spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); spin_unlock_irqrestore( &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); @@ -4792,8 +4385,7 @@ spin_lock_irqsave( &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); - spin_lock_irqsave( - &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); + spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); pAC->ActivePort = ToPort; #if 0 SetQueueSizes(pAC); @@ -4808,8 +4400,7 @@ pAC, pAC->ActivePort, DualNet)) { - spin_unlock_irqrestore( - &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); + spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); spin_unlock_irqrestore( &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); @@ -4835,8 +4426,7 @@ SkGePollTxD(pAC, IoC, ToPort, SK_TRUE); ClearAndStartRx(pAC, FromPort); ClearAndStartRx(pAC, ToPort); - spin_unlock_irqrestore( - &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); + spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); spin_unlock_irqrestore( &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); @@ -5311,8 +4901,316 @@ #endif -/******************************************************************************* - * - * End of file - * - ******************************************************************************/ +static int __devinit skge_probe_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + SK_AC *pAC; + DEV_NET *pNet = NULL; + struct net_device *dev = NULL; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *pProcFile; +#endif + static int boards_found = 0; + int error = -ENODEV; + + if (pci_enable_device(pdev)) + goto out; + + /* Configure DMA attributes. */ + if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL) && + pci_set_dma_mask(pdev, (u64) 0xffffffff)) + goto out_disable_device; + + + if ((dev = alloc_etherdev(sizeof(DEV_NET))) == NULL) { + printk(KERN_ERR "Unable to allocate etherdev " + "structure!\n"); + goto out_disable_device; + } + + pNet = dev->priv; + pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL); + if (!pNet->pAC) { + printk(KERN_ERR "Unable to allocate adapter " + "structure!\n"); + goto out_free_netdev; + } + + memset(pNet->pAC, 0, sizeof(SK_AC)); + pAC = pNet->pAC; + pAC->PciDev = pdev; + pAC->PciDevId = pdev->device; + pAC->dev[0] = dev; + pAC->dev[1] = dev; + sprintf(pAC->Name, "SysKonnect SK-98xx"); + pAC->CheckQueue = SK_FALSE; + + pNet->Mtu = 1500; + pNet->Up = 0; + dev->irq = pdev->irq; + error = SkGeInitPCI(pAC); + if (error) { + printk("SKGE: PCI setup failed: %i\n", error); + goto out_free_netdev; + } + + SET_MODULE_OWNER(dev); + dev->open = &SkGeOpen; + dev->stop = &SkGeClose; + dev->hard_start_xmit = &SkGeXmit; + dev->get_stats = &SkGeStats; + dev->last_stats = &SkGeStats; + dev->set_multicast_list = &SkGeSetRxMode; + dev->set_mac_address = &SkGeSetMacAddr; + dev->do_ioctl = &SkGeIoctl; + dev->change_mtu = &SkGeChangeMtu; + dev->flags &= ~IFF_RUNNING; + SET_NETDEV_DEV(dev, &pdev->dev); + +#ifdef SK_ZEROCOPY +#ifdef USE_SK_TX_CHECKSUM + if (pAC->ChipsetType) { + /* Use only if yukon hardware */ + /* SK and ZEROCOPY - fly baby... */ + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + } +#endif +#endif + + pAC->Index = boards_found++; + + if (SkGeBoardInit(dev, pAC)) + goto out_free_netdev; + + /* Register net device */ + if (register_netdev(dev)) { + printk(KERN_ERR "SKGE: Could not register device.\n"); + goto out_free_resources; + } + + /* Print adapter specific string from vpd */ + ProductStr(pAC); + printk("%s: %s\n", dev->name, pAC->DeviceStr); + + /* Print configuration settings */ + printk(" PrefPort:%c RlmtMode:%s\n", + 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber, + (pAC->RlmtMode==0) ? "Check Link State" : + ((pAC->RlmtMode==1) ? "Check Link State" : + ((pAC->RlmtMode==3) ? "Check Local Port" : + ((pAC->RlmtMode==7) ? "Check Segmentation" : + ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error"))))); + + SkGeYellowLED(pAC, pAC->IoBase, 1); + + + memcpy(&dev->dev_addr, &pAC->Addr.Net[0].CurrentMacAddress, 6); + +#ifdef CONFIG_PROC_FS + pProcFile = create_proc_entry(dev->name, S_IRUGO, pSkRootDir); + if (pProcFile) { + pProcFile->proc_fops = &sk_proc_fops; + pProcFile->data = dev; + pProcFile->owner = THIS_MODULE; + } +#endif + + pNet->PortNr = 0; + pNet->NetNr = 0; + + boards_found++; + + /* More then one port found */ + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + if ((dev = alloc_etherdev(sizeof(DEV_NET))) == 0) { + printk(KERN_ERR "Unable to allocate etherdev " + "structure!\n"); + goto out; + } + + pAC->dev[1] = dev; + pNet = dev->priv; + pNet->PortNr = 1; + pNet->NetNr = 1; + pNet->pAC = pAC; + pNet->Mtu = 1500; + pNet->Up = 0; + + dev->open = &SkGeOpen; + dev->stop = &SkGeClose; + dev->hard_start_xmit = &SkGeXmit; + dev->get_stats = &SkGeStats; + dev->last_stats = &SkGeStats; + dev->set_multicast_list = &SkGeSetRxMode; + dev->set_mac_address = &SkGeSetMacAddr; + dev->do_ioctl = &SkGeIoctl; + dev->change_mtu = &SkGeChangeMtu; + dev->flags &= ~IFF_RUNNING; + +#ifdef SK_ZEROCOPY +#ifdef USE_SK_TX_CHECKSUM + if (pAC->ChipsetType) { + /* SG and ZEROCOPY - fly baby... */ + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + } +#endif +#endif + + if (register_netdev(dev)) { + printk(KERN_ERR "SKGE: Could not register device.\n"); + free_netdev(dev); + pAC->dev[1] = pAC->dev[0]; + } else { +#ifdef CONFIG_PROC_FS + pProcFile = create_proc_entry(dev->name, S_IRUGO, + pSkRootDir); + if (pProcFile) { + pProcFile->proc_fops = &sk_proc_fops; + pProcFile->data = dev; + pProcFile->owner = THIS_MODULE; + } +#endif + + memcpy(&dev->dev_addr, + &pAC->Addr.Net[1].CurrentMacAddress, 6); + + printk("%s: %s\n", dev->name, pAC->DeviceStr); + printk(" PrefPort:B RlmtMode:Dual Check Link State\n"); + } + } + + /* Save the hardware revision */ + pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) + + (pAC->GIni.GIPciHwRev & 0x0F); + + /* Set driver globals */ + pAC->Pnmi.pDriverFileName = DRIVER_FILE_NAME; + pAC->Pnmi.pDriverReleaseDate = DRIVER_REL_DATE; + + memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA)); + memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA)); + + pci_set_drvdata(pdev, dev); + return 0; + + out_free_resources: + FreeResources(dev); + out_free_netdev: + free_netdev(dev); + out_disable_device: + pci_disable_device(pdev); + out: + return error; +} + +static void __devexit skge_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + DEV_NET *pNet = (DEV_NET *) dev->priv; + SK_AC *pAC = pNet->pAC; + int have_second_mac = 0; + + if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 2) + have_second_mac = 1; + + unregister_netdev(dev); + if (have_second_mac) + unregister_netdev(pAC->dev[1]); + + SkGeYellowLED(pAC, pAC->IoBase, 0); + + if (pAC->BoardLevel == SK_INIT_RUN) { + SK_EVPARA EvPara; + unsigned long Flags; + + /* board is still alive */ + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + EvPara.Para32[0] = 0; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + EvPara.Para32[0] = 1; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + SkEventDispatcher(pAC, pAC->IoBase); + /* disable interrupts */ + SK_OUT32(pAC->IoBase, B0_IMSK, 0); + SkGeDeInit(pAC, pAC->IoBase); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + pAC->BoardLevel = SK_INIT_DATA; + /* We do NOT check here, if IRQ was pending, of course*/ + } + + if (pAC->BoardLevel == SK_INIT_IO) { + /* board is still alive */ + SkGeDeInit(pAC, pAC->IoBase); + pAC->BoardLevel = SK_INIT_DATA; + } + + FreeResources(dev); + free_netdev(dev); + if (have_second_mac) + free_netdev(pAC->dev[1]); + kfree(pAC); +} + +static struct pci_device_id skge_pci_tbl[] = { + { PCI_VENDOR_ID_3COM, 0x1700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, +#if 0 /* don't handle Yukon2 cards at the moment -- mlindner@syskonnect.de */ + { PCI_VENDOR_ID_MARVELL, 0x4360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_MARVELL, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, +#endif + { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver skge_driver = { + .name = "skge", + .id_table = skge_pci_tbl, + .probe = skge_probe_one, + .remove = __devexit_p(skge_remove_one), +}; + +static int __init skge_init(void) +{ + int error; + + memcpy(&SK_Root_Dir_entry, BOOT_STRING, sizeof(SK_Root_Dir_entry) - 1); + +#ifdef CONFIG_PROC_FS + pSkRootDir = proc_mkdir(SK_Root_Dir_entry, proc_net); + if (!pSkRootDir) { + printk(KERN_WARNING "Unable to create /proc/net/%s", + SK_Root_Dir_entry); + return -ENOMEM; + } + pSkRootDir->owner = THIS_MODULE; +#endif + + error = pci_module_init(&skge_driver); + if (error) { +#ifdef CONFIG_PROC_FS + remove_proc_entry(pSkRootDir->name, proc_net); +#endif + } + + return error; +} + +static void __exit skge_exit(void) +{ + pci_unregister_driver(&skge_driver); +#ifdef CONFIG_PROC_FS + remove_proc_entry(pSkRootDir->name, proc_net); +#endif +} + +module_init(skge_init); +module_exit(skge_exit); diff -Nru a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c --- a/drivers/net/via-rhine.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/via-rhine.c 2004-07-13 12:46:19 -07:00 @@ -125,11 +125,16 @@ LK1.1.19 (Roger Luethi) - Increase Tx threshold for unspecified errors + LK1.2.0-2.6 (Roger Luethi) + - Massive clean-up + - Rewrite PHY, media handling (remove options, full_duplex, backoff) + - Fix Tx engine race for good + */ #define DRV_NAME "via-rhine" -#define DRV_VERSION "1.1.20-2.6" -#define DRV_RELDATE "May-23-2004" +#define DRV_VERSION "1.2.0-2.6" +#define DRV_RELDATE "June-10-2004" /* A few user-configurable values. @@ -142,22 +147,10 @@ Setting to > 1518 effectively disables this feature. */ static int rx_copybreak; -/* Select a backoff algorithm (Ethernet capture effect) */ -static int backoff; - -/* Used to pass the media type, etc. - Both 'options[]' and 'full_duplex[]' should exist for driver - interoperability. - The media type is usually passed in 'options[]'. - The default is autonegotiation for speed and duplex. - This should rarely be overridden. - Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps. - Use option values 0x10 and 0x100 for forcing half duplex fixed speed. - Use option values 0x20 and 0x200 for forcing full duplex operation. -*/ -#define MAX_UNITS 8 /* More are supported, limit only on options */ -static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; -static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +/* + * In case you are looking for 'options[]' or 'full_duplex[]', they + * are gone. Use ethtool(8) instead. + */ /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). The Rhine has a 64 element 8390-like hash table. */ @@ -210,9 +203,6 @@ static char version[] __devinitdata = KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"; -static char shortname[] = DRV_NAME; - - /* This driver was written to use PCI memory space. Some early versions of the Rhine may only work correctly with I/O space accesses. */ #ifdef CONFIG_VIA_RHINE_MMIO @@ -239,15 +229,9 @@ MODULE_PARM(max_interrupt_work, "i"); MODULE_PARM(debug, "i"); MODULE_PARM(rx_copybreak, "i"); -MODULE_PARM(backoff, "i"); -MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i"); -MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); -MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm"); -MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex"); -MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)"); /* Theory of Operation @@ -350,24 +334,24 @@ enum rhine_revs { VT86C100A = 0x00, + VTunknown0 = 0x20, VT6102 = 0x40, VT8231 = 0x50, /* Integrated MAC */ VT8233 = 0x60, /* Integrated MAC */ VT8235 = 0x74, /* Integrated MAC */ VT8237 = 0x78, /* Integrated MAC */ - VTunknown0 = 0x7C, + VTunknown1 = 0x7C, VT6105 = 0x80, VT6105_B0 = 0x83, VT6105L = 0x8A, VT6107 = 0x8C, - VTunknown1 = 0x8E, + VTunknown2 = 0x8E, VT6105M = 0x90, }; enum rhine_quirks { rqWOL = 0x0001, /* Wake-On-LAN support */ rqForceReset = 0x0002, - rqDavicomPhy = 0x0020, rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ rqRhineI = 0x0100, /* See comment below */ @@ -395,6 +379,7 @@ /* Offsets to the device registers. */ enum register_offsets { StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, + ChipCmd1=0x09, IntrStatus=0x0C, IntrEnable=0x0E, MulticastFilter0=0x10, MulticastFilter1=0x14, RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, @@ -403,8 +388,8 @@ ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, StickyHW=0x83, IntrStatus2=0x84, - WOLcrSet=0xA0, WOLcrClr=0xA4, WOLcrClr1=0xA6, - WOLcgClr=0xA7, + WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, + WOLcrClr1=0xA6, WOLcgClr=0xA7, PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, }; @@ -436,6 +421,15 @@ IntrTxErrSummary=0x082218, }; +/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ +enum wol_bits { + WOLucast = 0x10, + WOLmagic = 0x20, + WOLbmcast = 0x30, + WOLlnkon = 0x40, + WOLlnkoff = 0x80, +}; + /* The Rx and Tx buffer descriptors. */ struct rx_desc { s32 rx_status; @@ -464,13 +458,12 @@ /* Bits in ChipCmd. */ enum chip_cmd_bits { - CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008, - CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040, - CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400, - CmdNoTxPoll=0x0800, CmdReset=0x8000, + CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, + CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, + Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, + Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, }; -#define MAX_MII_CNT 4 struct rhine_private { /* Descriptor rings */ struct rx_desc *rx_ring; @@ -493,7 +486,6 @@ struct pci_dev *pdev; struct net_device_stats stats; - struct timer_list timer; /* Media monitoring timer. */ spinlock_t lock; /* Frequently used values: keep some adjacent for cache effect. */ @@ -502,23 +494,16 @@ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ unsigned int cur_tx, dirty_tx; unsigned int rx_buf_sz; /* Based on MTU+slack. */ - u16 chip_cmd; /* Current setting for ChipCmd */ + u8 wolopts; - /* These values are keep track of the transceiver/media in use. */ u8 tx_thresh, rx_thresh; - /* MII transceiver section. */ - unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */ - unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */ - u16 mii_status; /* last read MII status */ struct mii_if_info mii_if; }; static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int rhine_open(struct net_device *dev); -static void rhine_check_duplex(struct net_device *dev); -static void rhine_timer(unsigned long data); static void rhine_tx_timeout(struct net_device *dev); static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); @@ -530,6 +515,16 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); +static void rhine_shutdown (struct device *gdev); + +#define RHINE_WAIT_FOR(condition) do { \ + int i=1024; \ + while (!(condition) && --i) \ + ; \ + if (debug > 1 && i < 512) \ + printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \ + DRV_NAME, 1024-i, __func__, __LINE__); \ +} while(0) static inline u32 get_intr_status(struct net_device *dev) { @@ -546,12 +541,13 @@ /* * Get power related registers into sane state. - * Returns content of power-event (WOL) registers. + * Notify user about past WOL event. */ static void rhine_power_init(struct net_device *dev) { long ioaddr = dev->base_addr; struct rhine_private *rp = netdev_priv(dev); + u16 wolstat; if (rp->quirks & rqWOL) { /* Make sure chip is in power state D0 */ @@ -566,63 +562,109 @@ if (rp->quirks & rq6patterns) writeb(0x03, ioaddr + WOLcrClr1); + /* Save power-event status bits */ + wolstat = readb(ioaddr + PwrcsrSet); + if (rp->quirks & rq6patterns) + wolstat |= (readb(ioaddr + PwrcsrSet1) & 0x03) << 8; + /* Clear power-event status bits */ writeb(0xFF, ioaddr + PwrcsrClr); if (rp->quirks & rq6patterns) writeb(0x03, ioaddr + PwrcsrClr1); + + if (wolstat) { + char *reason; + switch (wolstat) { + case WOLmagic: + reason = "Magic packet"; + break; + case WOLlnkon: + reason = "Link went up"; + break; + case WOLlnkoff: + reason = "Link went down"; + break; + case WOLucast: + reason = "Unicast packet"; + break; + case WOLbmcast: + reason = "Multicast/broadcast packet"; + break; + default: + reason = "Unknown"; + } + printk("%s: Woke system up. Reason: %s.\n", + DRV_NAME, reason); + } } } -static void wait_for_reset(struct net_device *dev, u32 quirks, char *name) +static void rhine_chip_reset(struct net_device *dev) { long ioaddr = dev->base_addr; - int boguscnt = 20; + struct rhine_private *rp = netdev_priv(dev); + writeb(Cmd1Reset, ioaddr + ChipCmd1); IOSYNC; - if (readw(ioaddr + ChipCmd) & CmdReset) { + if (readb(ioaddr + ChipCmd1) & Cmd1Reset) { printk(KERN_INFO "%s: Reset not complete yet. " - "Trying harder.\n", name); + "Trying harder.\n", DRV_NAME); - /* Rhine-II needs to be forced sometimes */ - if (quirks & rqForceReset) + /* Force reset */ + if (rp->quirks & rqForceReset) writeb(0x40, ioaddr + MiscCmd); - /* VT86C100A may need long delay after reset (dlink) */ - /* Seen on Rhine-II as well (rl) */ - while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt) - udelay(5); - + /* Reset can take somewhat longer (rare) */ + RHINE_WAIT_FOR(!(readb(ioaddr + ChipCmd1) & Cmd1Reset)); } if (debug > 1) - printk(KERN_INFO "%s: Reset %s.\n", name, - boguscnt ? "succeeded" : "failed"); + printk(KERN_INFO "%s: Reset %s.\n", dev->name, + (readb(ioaddr + ChipCmd1) & Cmd1Reset) ? + "failed" : "succeeded"); } #ifdef USE_MMIO -static void __devinit enable_mmio(long ioaddr, u32 quirks) +static void __devinit enable_mmio(long pioaddr, u32 quirks) { int n; if (quirks & rqRhineI) { /* More recent docs say that this bit is reserved ... */ - n = inb(ioaddr + ConfigA) | 0x20; - outb(n, ioaddr + ConfigA); + n = inb(pioaddr + ConfigA) | 0x20; + outb(n, pioaddr + ConfigA); } else { - n = inb(ioaddr + ConfigD) | 0x80; - outb(n, ioaddr + ConfigD); + n = inb(pioaddr + ConfigD) | 0x80; + outb(n, pioaddr + ConfigD); } } #endif -static void __devinit reload_eeprom(long ioaddr) +/* + * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM + * (plus 0x6C for Rhine-I/II) + */ +static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev) { - int i; - outb(0x20, ioaddr + MACRegEEcsr); - /* Typically 2 cycles to reload. */ - for (i = 0; i < 150; i++) - if (! (inb(ioaddr + MACRegEEcsr) & 0x20)) - break; + long ioaddr = dev->base_addr; + struct rhine_private *rp = netdev_priv(dev); + + outb(0x20, pioaddr + MACRegEEcsr); + RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20)); + +#ifdef USE_MMIO + /* + * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable + * MMIO. If reloading EEPROM was done first this could be avoided, but + * it is not known if that still works with the "win98-reboot" problem. + */ + enable_mmio(pioaddr, rp->quirks); +#endif + + /* Turn off EEPROM-controlled wake-up (magic packet) */ + if (rp->quirks & rqWOL) + writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA); + } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -634,23 +676,34 @@ } #endif +static void rhine_hw_init(struct net_device *dev, long pioaddr) +{ + struct rhine_private *rp = netdev_priv(dev); + + /* Reset the chip to erase previous misconfiguration. */ + rhine_chip_reset(dev); + + /* Rhine-I needs extra time to recuperate before EEPROM reload */ + if (rp->quirks & rqRhineI) + msleep(5); + + /* Reload EEPROM controlled bytes cleared by soft reset */ + rhine_reload_eeprom(pioaddr, dev); +} + static int __devinit rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct rhine_private *rp; - int i, option, rc; + int i, rc; u8 pci_rev; u32 quirks; - static int card_idx = -1; - long ioaddr; + long pioaddr; long memaddr; - int io_size; - int phy, phy_idx = 0; -#ifdef USE_MMIO - long ioaddr0; -#endif - const char *name; + long ioaddr; + int io_size, phy_id; + const char *name, *mname; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -659,26 +712,47 @@ printk(version); #endif - card_idx++; - option = card_idx < MAX_UNITS ? options[card_idx] : 0; pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); io_size = 256; - if (pci_rev < VT6102) { - quirks = rqRhineI | rqDavicomPhy; + phy_id = 0; + quirks = 0; + name = "Rhine"; + mname = "unknown"; + if (pci_rev < VTunknown0) { + quirks = rqRhineI; io_size = 128; - name = "VT86C100A Rhine"; + mname = "VT86C100A"; } - else { + else if (pci_rev >= VT6102) { quirks = rqWOL | rqForceReset; if (pci_rev < VT6105) { name = "Rhine II"; quirks |= rqStatusWBRace; /* Rhine-II exclusive */ + if (pci_rev < VT8231) + mname = "VT6102"; + else if (pci_rev < VT8233) + mname = "VT8231"; + else if (pci_rev < VT8235) + mname = "VT8233"; + else if (pci_rev < VT8237) + mname = "VT8235"; + else if (pci_rev < VTunknown1) + mname = "VT8237"; } else { name = "Rhine III"; + phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ if (pci_rev >= VT6105_B0) quirks |= rq6patterns; + if (pci_rev < VT6105L) + mname = "VT6105"; + else if (pci_rev < VT6107) + mname = "VT6105L"; + else if (pci_rev < VT6105M) + mname = "VT6107"; + else if (pci_rev >= VT6105M) + mname = "Management Adapter VT6105M"; } } @@ -702,28 +776,26 @@ goto err_out; } - ioaddr = pci_resource_start(pdev, 0); + pioaddr = pci_resource_start(pdev, 0); memaddr = pci_resource_start(pdev, 1); pci_set_master(pdev); - dev = alloc_etherdev(sizeof(*rp)); - if (dev == NULL) { + dev = alloc_etherdev(sizeof(struct rhine_private)); + if (!dev) { rc = -ENOMEM; - printk(KERN_ERR "init_ethernet failed for card #%d\n", - card_idx); + printk(KERN_ERR "alloc_etherdev failed\n"); goto err_out; } SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); - rc = pci_request_regions(pdev, shortname); + rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_free_netdev; #ifdef USE_MMIO - ioaddr0 = ioaddr; - enable_mmio(ioaddr0, quirks); + enable_mmio(pioaddr, quirks); ioaddr = (long) ioremap(memaddr, io_size); if (!ioaddr) { @@ -737,7 +809,7 @@ i = 0; while (mmio_verify_registers[i]) { int reg = mmio_verify_registers[i++]; - unsigned char a = inb(ioaddr0+reg); + unsigned char a = inb(pioaddr+reg); unsigned char b = readb(ioaddr+reg); if (a != b) { rc = -EIO; @@ -746,65 +818,41 @@ goto err_out_unmap; } } +#else + ioaddr = pioaddr; #endif /* USE_MMIO */ + dev->base_addr = ioaddr; + rp = netdev_priv(dev); + rp->quirks = quirks; + /* Get chip registers into a sane state */ rhine_power_init(dev); - - /* Reset the chip to erase previous misconfiguration. */ - writew(CmdReset, ioaddr + ChipCmd); - - wait_for_reset(dev, quirks, shortname); - - /* Reload the station address from the EEPROM. */ -#ifdef USE_MMIO - reload_eeprom(ioaddr0); - /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO. - If reload_eeprom() was done first this could be avoided, but it is - not known if that still works with the "win98-reboot" problem. */ - enable_mmio(ioaddr0, quirks); -#else - reload_eeprom(ioaddr); -#endif + rhine_hw_init(dev, pioaddr); for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(ioaddr + StationAddr + i); if (!is_valid_ether_addr(dev->dev_addr)) { rc = -EIO; - printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx); + printk(KERN_ERR "Invalid MAC address\n"); goto err_out_unmap; } - if (quirks & rqWOL) { - /* - * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA - * turned on. it makes MAC receive magic packet - * automatically. So, we turn it off. (D-Link) - */ - writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA); - } - - /* Select backoff algorithm */ - if (backoff) - writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff), - ioaddr + ConfigD); + /* For Rhine-I/II, phy_id is loaded from EEPROM */ + if (!phy_id) + phy_id = readb(ioaddr + 0x6C); dev->irq = pdev->irq; - rp = netdev_priv(dev); spin_lock_init(&rp->lock); rp->pdev = pdev; - rp->quirks = quirks; rp->mii_if.dev = dev; rp->mii_if.mdio_read = mdio_read; rp->mii_if.mdio_write = mdio_write; rp->mii_if.phy_id_mask = 0x1f; rp->mii_if.reg_num_mask = 0x1f; - if (dev->mem_start) - option = dev->mem_start; - /* The chip-specific entries in the device structure. */ dev->open = rhine_open; dev->hard_start_xmit = rhine_start_tx; @@ -826,22 +874,8 @@ if (rc) goto err_out_unmap; - /* The lower four bits are the media type. */ - if (option > 0) { - if (option & 0x220) - rp->mii_if.full_duplex = 1; - } - if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) - rp->mii_if.full_duplex = 1; - - if (rp->mii_if.full_duplex) { - printk(KERN_INFO "%s: Set to forced full duplex, " - "autonegotiation disabled.\n", dev->name); - rp->mii_if.force_media = 1; - } - - printk(KERN_INFO "%s: VIA %s at 0x%lx, ", - dev->name, name, + printk(KERN_INFO "%s: VIA %s (%s) at 0x%lx, ", + dev->name, name, mname, #ifdef USE_MMIO memaddr #else @@ -855,17 +889,15 @@ pci_set_drvdata(pdev, dev); - rp->phys[0] = 1; /* Standard for this chip. */ - for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) { - int mii_status = mdio_read(dev, phy, 1); + { + int mii_status = mdio_read(dev, phy_id, 1); if (mii_status != 0xffff && mii_status != 0x0000) { - rp->phys[phy_idx++] = phy; - rp->mii_if.advertising = mdio_read(dev, phy, 4); + rp->mii_if.advertising = mdio_read(dev, phy_id, 4); printk(KERN_INFO "%s: MII PHY found at address " "%d, status 0x%4.4x advertising %4.4x " - "Link %4.4x.\n", dev->name, phy, + "Link %4.4x.\n", dev->name, phy_id, mii_status, rp->mii_if.advertising, - mdio_read(dev, phy, 5)); + mdio_read(dev, phy_id, 5)); /* set IFF_RUNNING */ if (mii_status & BMSR_LSTATUS) @@ -873,27 +905,9 @@ else netif_carrier_off(dev); - break; - } - } - rp->mii_cnt = phy_idx; - rp->mii_if.phy_id = rp->phys[0]; - - /* Allow forcing the media type. */ - if (option > 0) { - if (option & 0x220) - rp->mii_if.full_duplex = 1; - if (option & 0x330) { - printk(KERN_INFO " Forcing %dMbs %s-duplex " - "operation.\n", - (option & 0x300 ? 100 : 10), - (option & 0x220 ? "full" : "half")); - if (rp->mii_cnt) - mdio_write(dev, rp->phys[0], MII_BMCR, - ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */ - ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */ } } + rp->mii_if.phy_id = phy_id; return 0; @@ -1065,6 +1079,21 @@ } } +static void rhine_check_media(struct net_device *dev, unsigned int init_media) +{ + struct rhine_private *rp = netdev_priv(dev); + long ioaddr = dev->base_addr; + + mii_check_media(&rp->mii_if, debug, init_media); + + if (rp->mii_if.full_duplex) + writeb(readb(ioaddr + ChipCmd1) | Cmd1FDuplex, + ioaddr + ChipCmd1); + else + writeb(readb(ioaddr + ChipCmd1) & ~Cmd1FDuplex, + ioaddr + ChipCmd1); +} + static void init_registers(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); @@ -1080,7 +1109,6 @@ writeb(0x20, ioaddr + TxConfig); rp->tx_thresh = 0x20; rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ - rp->mii_if.full_duplex = 0; writel(rp->rx_ring_dma, ioaddr + RxRingPtr); writel(rp->tx_ring_dma, ioaddr + TxRingPtr); @@ -1094,17 +1122,44 @@ IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); - rp->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll; - if (rp->mii_if.force_media) - rp->chip_cmd |= CmdFDuplex; - writew(rp->chip_cmd, ioaddr + ChipCmd); - - rhine_check_duplex(dev); - - /* The LED outputs of various MII xcvrs should be configured. */ - /* For NS or Mison phys, turn on bit 1 in register 0x17 */ - mdio_write(dev, rp->phys[0], 0x17, mdio_read(dev, rp->phys[0], 0x17) | - 0x0001); + writew(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), + ioaddr + ChipCmd); + rhine_check_media(dev, 1); +} + +/* Enable MII link status auto-polling (required for IntrLinkChange) */ +static void rhine_enable_linkmon(long ioaddr) +{ + writeb(0, ioaddr + MIICmd); + writeb(MII_BMSR, ioaddr + MIIRegAddr); + writeb(0x80, ioaddr + MIICmd); + + RHINE_WAIT_FOR((readb(ioaddr + MIIRegAddr) & 0x20)); + + writeb(MII_BMSR | 0x40, ioaddr + MIIRegAddr); +} + +/* Disable MII link status auto-polling (required for MDIO access) */ +static void rhine_disable_linkmon(long ioaddr, u32 quirks) +{ + writeb(0, ioaddr + MIICmd); + + if (quirks & rqRhineI) { + writeb(0x01, ioaddr + MIIRegAddr); // MII_BMSR + + /* Can be called from ISR. Evil. */ + mdelay(1); + + /* 0x80 must be set immediately before turning it off */ + writeb(0x80, ioaddr + MIICmd); + + RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x20); + + /* Heh. Now clear 0x80 again. */ + writeb(0, ioaddr + MIICmd); + } + else + RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x80); } /* Read and write over the MII Management Data I/O (MDIO) interface. */ @@ -1112,156 +1167,72 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum) { long ioaddr = dev->base_addr; - int boguscnt = 1024; + struct rhine_private *rp = netdev_priv(dev); + int result; - /* Wait for a previous command to complete. */ - while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0) - ; - writeb(0x00, ioaddr + MIICmd); + rhine_disable_linkmon(ioaddr, rp->quirks); + + writeb(0, ioaddr + MIICmd); writeb(phy_id, ioaddr + MIIPhyAddr); writeb(regnum, ioaddr + MIIRegAddr); writeb(0x40, ioaddr + MIICmd); /* Trigger read */ - boguscnt = 1024; - while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0) - ; - return readw(ioaddr + MIIData); + RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x40)); + result = readw(ioaddr + MIIData); + + rhine_enable_linkmon(ioaddr); + return result; } static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) { struct rhine_private *rp = netdev_priv(dev); long ioaddr = dev->base_addr; - int boguscnt = 1024; - if (phy_id == rp->phys[0]) { - switch (regnum) { - case MII_BMCR: /* Is user forcing speed/duplex? */ - if (value & 0x9000) /* Autonegotiation. */ - rp->mii_if.force_media = 0; - else - rp->mii_if.full_duplex = (value & 0x0100) ? 1 : 0; - break; - case MII_ADVERTISE: - rp->mii_if.advertising = value; - break; - } - } + rhine_disable_linkmon(ioaddr, rp->quirks); - /* Wait for a previous command to complete. */ - while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0) - ; - writeb(0x00, ioaddr + MIICmd); + writeb(0, ioaddr + MIICmd); writeb(phy_id, ioaddr + MIIPhyAddr); writeb(regnum, ioaddr + MIIRegAddr); writew(value, ioaddr + MIIData); - writeb(0x20, ioaddr + MIICmd); /* Trigger write. */ -} + writeb(0x20, ioaddr + MIICmd); /* Trigger write */ + RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x20)); + rhine_enable_linkmon(ioaddr); +} static int rhine_open(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); long ioaddr = dev->base_addr; - int i; - - /* Reset the chip. */ - writew(CmdReset, ioaddr + ChipCmd); + int rc; - i = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name, + rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name, dev); - if (i) - return i; + if (rc) + return rc; if (debug > 1) printk(KERN_DEBUG "%s: rhine_open() irq %d.\n", dev->name, rp->pdev->irq); - i = alloc_ring(dev); - if (i) - return i; + rc = alloc_ring(dev); + if (rc) + return rc; alloc_rbufs(dev); alloc_tbufs(dev); - wait_for_reset(dev, rp->quirks, dev->name); + rhine_chip_reset(dev); init_registers(dev); if (debug > 2) printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x " "MII status: %4.4x.\n", dev->name, readw(ioaddr + ChipCmd), - mdio_read(dev, rp->phys[0], MII_BMSR)); + mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); netif_start_queue(dev); - /* Set the timer to check for link beat. */ - init_timer(&rp->timer); - rp->timer.expires = jiffies + 2 * HZ/100; - rp->timer.data = (unsigned long)dev; - rp->timer.function = &rhine_timer; /* timer handler */ - add_timer(&rp->timer); - return 0; } -static void rhine_check_duplex(struct net_device *dev) -{ - struct rhine_private *rp = netdev_priv(dev); - long ioaddr = dev->base_addr; - int mii_lpa = mdio_read(dev, rp->phys[0], MII_LPA); - int negotiated = mii_lpa & rp->mii_if.advertising; - int duplex; - - if (rp->mii_if.force_media || mii_lpa == 0xffff) - return; - duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; - if (rp->mii_if.full_duplex != duplex) { - rp->mii_if.full_duplex = duplex; - if (debug) - printk(KERN_INFO "%s: Setting %s-duplex based on " - "MII #%d link partner capability of %4.4x.\n", - dev->name, duplex ? "full" : "half", - rp->phys[0], mii_lpa); - if (duplex) - rp->chip_cmd |= CmdFDuplex; - else - rp->chip_cmd &= ~CmdFDuplex; - writew(rp->chip_cmd, ioaddr + ChipCmd); - } -} - - -static void rhine_timer(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct rhine_private *rp = netdev_priv(dev); - long ioaddr = dev->base_addr; - int next_tick = 10*HZ; - int mii_status; - - if (debug > 3) { - printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n", - dev->name, readw(ioaddr + IntrStatus)); - } - - spin_lock_irq (&rp->lock); - - rhine_check_duplex(dev); - - /* make IFF_RUNNING follow the MII status bit "Link established" */ - mii_status = mdio_read(dev, rp->phys[0], MII_BMSR); - if ((mii_status & BMSR_LSTATUS) != (rp->mii_status & BMSR_LSTATUS)) { - if (mii_status & BMSR_LSTATUS) - netif_carrier_on(dev); - else - netif_carrier_off(dev); - } - rp->mii_status = mii_status; - - spin_unlock_irq(&rp->lock); - - rp->timer.expires = jiffies + next_tick; - add_timer(&rp->timer); -} - - static void rhine_tx_timeout(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); @@ -1270,16 +1241,13 @@ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " "%4.4x, resetting...\n", dev->name, readw(ioaddr + IntrStatus), - mdio_read(dev, rp->phys[0], MII_BMSR)); + mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); /* protect against concurrent rx interrupts */ disable_irq(rp->pdev->irq); spin_lock(&rp->lock); - /* Reset the chip. */ - writew(CmdReset, ioaddr + ChipCmd); - /* clear all descriptors */ free_tbufs(dev); free_rbufs(dev); @@ -1287,7 +1255,7 @@ alloc_rbufs(dev); /* Reinitialize the hardware. */ - wait_for_reset(dev, rp->quirks, dev->name); + rhine_chip_reset(dev); init_registers(dev); spin_unlock(&rp->lock); @@ -1301,8 +1269,8 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); + long ioaddr = dev->base_addr; unsigned entry; - u32 intr_status; /* Caution: the write order is important here, set the field with the "ownership" bits last. */ @@ -1353,14 +1321,9 @@ /* Non-x86 Todo: explicitly flush cache lines here. */ - /* - * Wake the potentially-idle transmit channel unless errors are - * pending (the ISR must sort them out first). - */ - intr_status = get_intr_status(dev); - if ((intr_status & IntrTxErrSummary) == 0) { - writew(CmdTxDemand | rp->chip_cmd, dev->base_addr + ChipCmd); - } + /* Wake the potentially-idle transmit channel */ + writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand, + ioaddr + ChipCmd1); IOSYNC; if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) @@ -1408,11 +1371,10 @@ if (intr_status & (IntrTxErrSummary | IntrTxDone)) { if (intr_status & IntrTxErrSummary) { - int cnt = 20; /* Avoid scavenging before Tx engine turned off */ - while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt) - udelay(5); - if (debug > 2 && !cnt) + RHINE_WAIT_FOR(!(readb(ioaddr+ChipCmd) & CmdTxOn)); + if (debug > 2 && + readb(ioaddr+ChipCmd) & CmdTxOn) printk(KERN_WARNING "%s: " "rhine_interrupt() Tx engine" "still on.\n", dev->name); @@ -1572,10 +1534,6 @@ rp->rx_buf_sz, PCI_DMA_FROMDEVICE); - /* *_IP_COPYSUM isn't defined anywhere and - eth_copy_and_sum is memcpy for all archs so - this is kind of pointless right now - ... or? */ eth_copy_and_sum(skb, rp->rx_skbuff[entry]->tail, pkt_len, 0); @@ -1627,10 +1585,6 @@ } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); } - - /* Pre-emptively restart Rx engine. */ - writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand, - dev->base_addr + ChipCmd); } /* @@ -1664,7 +1618,10 @@ writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc), ioaddr + TxRingPtr); - writew(CmdTxDemand | rp->chip_cmd, ioaddr + ChipCmd); + writeb(readb(ioaddr + ChipCmd) | CmdTxOn, + ioaddr + ChipCmd); + writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand, + ioaddr + ChipCmd1); IOSYNC; } else { @@ -1684,20 +1641,8 @@ spin_lock(&rp->lock); - if (intr_status & (IntrLinkChange)) { - if (readb(ioaddr + MIIStatus) & 0x02) { - /* Link failed, restart autonegotiation. */ - if (rp->quirks & rqRhineI) - mdio_write(dev, rp->phys[0], MII_BMCR, 0x3300); - } else - rhine_check_duplex(dev); - if (debug) - printk(KERN_ERR "%s: MII status changed: " - "Autonegotiation advertising %4.4x partner " - "%4.4x.\n", dev->name, - mdio_read(dev, rp->phys[0], MII_ADVERTISE), - mdio_read(dev, rp->phys[0], MII_LPA)); - } + if (intr_status & IntrLinkChange) + rhine_check_media(dev, 0); if (intr_status & IntrStatsMax) { rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs); rp->stats.rx_missed_errors += readw(ioaddr + RxMissed); @@ -1790,7 +1735,7 @@ i++, mclist = mclist->next) { int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; - mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31)); + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } writel(mc_filter[0], ioaddr + MulticastFilter0); writel(mc_filter[1], ioaddr + MulticastFilter1); @@ -1856,6 +1801,39 @@ debug = value; } +static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rhine_private *rp = netdev_priv(dev); + + if (!(rp->quirks & rqWOL)) + return; + + spin_lock_irq(&rp->lock); + wol->supported = WAKE_PHY | WAKE_MAGIC | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ + wol->wolopts = rp->wolopts; + spin_unlock_irq(&rp->lock); +} + +static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rhine_private *rp = netdev_priv(dev); + u32 support = WAKE_PHY | WAKE_MAGIC | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ + + if (!(rp->quirks & rqWOL)) + return -EINVAL; + + if (wol->wolopts & ~support) + return -EINVAL; + + spin_lock_irq(&rp->lock); + rp->wolopts = wol->wolopts; + spin_unlock_irq(&rp->lock); + + return 0; +} + static struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, @@ -1864,6 +1842,8 @@ .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, + .get_wol = rhine_get_wol, + .set_wol = rhine_set_wol, .get_sg = ethtool_op_get_sg, .get_tx_csum = ethtool_op_get_tx_csum, }; @@ -1888,8 +1868,6 @@ long ioaddr = dev->base_addr; struct rhine_private *rp = netdev_priv(dev); - del_timer_sync(&rp->timer); - spin_lock_irq(&rp->lock); netif_stop_queue(dev); @@ -1936,12 +1914,51 @@ pci_set_drvdata(pdev, NULL); } +static void rhine_shutdown (struct device *gendev) +{ + struct pci_dev *pdev = to_pci_dev(gendev); + struct net_device *dev = pci_get_drvdata(pdev); + struct rhine_private *rp = netdev_priv(dev); + + long ioaddr = dev->base_addr; + + rhine_power_init(dev); + + /* Make sure we use pattern 0, 1 and not 4, 5 */ + if (rp->quirks & rq6patterns) + writeb(0x04, ioaddr + 0xA7); + + if (rp->wolopts & WAKE_MAGIC) + writeb(WOLmagic, ioaddr + WOLcrSet); + + if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) + writeb(WOLbmcast, ioaddr + WOLcgSet); + + if (rp->wolopts & WAKE_PHY) + writeb(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); + + if (rp->wolopts & WAKE_UCAST) + writeb(WOLucast, ioaddr + WOLcrSet); + + /* Enable legacy WOL (for old motherboards) */ + writeb(0x01, ioaddr + PwcfgSet); + writeb(readb(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); + + /* Hit power state D3 (sleep) */ + writeb(readb(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); + + /* TODO: Check use of pci_enable_wake() */ + +} static struct pci_driver rhine_driver = { - .name = "via-rhine", + .name = DRV_NAME, .id_table = rhine_pci_tbl, .probe = rhine_init_one, .remove = __devexit_p(rhine_remove_one), + .driver = { + .shutdown = rhine_shutdown, + } }; diff -Nru a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c --- a/drivers/net/via-velocity.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/via-velocity.c 2004-07-13 12:46:19 -07:00 @@ -78,6 +78,8 @@ #include #include #include +#include +#include #include "via-velocity.h" @@ -226,7 +228,10 @@ VELOCITY_PARAM(int_works, "Number of packets per interrupt services"); -static int velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent); +static int rx_copybreak = 200; +MODULE_PARM(rx_copybreak, "i"); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); + static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info); static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev); static void velocity_print_info(struct velocity_info *vptr); @@ -238,10 +243,8 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev); static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int velocity_close(struct net_device *dev); -static int velocity_rx_srv(struct velocity_info *vptr, int status); static int velocity_receive_frame(struct velocity_info *, int idx); static int velocity_alloc_rx_buf(struct velocity_info *, int idx); -static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type); static void velocity_free_rd_ring(struct velocity_info *vptr); static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *); static int velocity_soft_reset(struct velocity_info *vptr); @@ -254,12 +257,8 @@ static void enable_mii_autopoll(struct mac_regs * regs); static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata); static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data); -static int velocity_set_wol(struct velocity_info *vptr); -static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context); -static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context); static u32 mii_check_media_mode(struct mac_regs * regs); static u32 check_connection_type(struct mac_regs * regs); -static void velocity_init_cam_filter(struct velocity_info *vptr); static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status); #ifdef CONFIG_PM @@ -269,8 +268,9 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr); static struct notifier_block velocity_inetaddr_notifier = { - notifier_call:velocity_netdev_event, + .notifier_call = velocity_netdev_event, }; +static int velocity_notifier_registered; #endif /* CONFIG_PM */ @@ -289,8 +289,9 @@ */ static struct pci_device_id velocity_id_table[] __devinitdata = { - {0x1106, 0x3119, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &chip_info_table[0]}, - {0,} + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) chip_info_table}, + {0, } }; MODULE_DEVICE_TABLE(pci, velocity_id_table); @@ -463,6 +464,12 @@ } } +static inline void velocity_give_rx_desc(struct rx_desc *rd) +{ + *(u32 *)&rd->rdesc0 = 0; + rd->rdesc0.owner = cpu_to_le32(OWNED_BY_NIC); +} + /** * velocity_rx_reset - handle a receive reset * @vptr: velocity we are resetting @@ -477,13 +484,13 @@ struct mac_regs * regs = vptr->mac_regs; int i; - vptr->rd_used = vptr->rd_curr = 0; + vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; /* * Init state, all RD entries belong to the NIC */ for (i = 0; i < vptr->options.numrx; ++i) - vptr->rd_ring[i].rdesc0.owner = cpu_to_le32(OWNED_BY_NIC); + velocity_give_rx_desc(vptr->rd_ring + i); writew(vptr->options.numrx, ®s->RBRDU); writel(vptr->rd_pool_dma, ®s->RDBaseLo); @@ -776,6 +783,12 @@ pci_set_power_state(pdev, 3); out: +#ifdef CONFIG_PM + if (ret == 0 && !velocity_notifier_registered) { + velocity_notifier_registered = 1; + register_inetaddr_notifier(&velocity_inetaddr_notifier); + } +#endif return ret; err_iounmap: @@ -966,6 +979,60 @@ pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma); } +static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) +{ + struct mac_regs *regs = vptr->mac_regs; + int avail, dirty, unusable; + + /* + * RD number must be equal to 4X per hardware spec + * (programming guide rev 1.20, p.13) + */ + if (vptr->rd_filled < 4) + return; + + wmb(); + + unusable = vptr->rd_filled | 0x0003; + dirty = vptr->rd_dirty - unusable + 1; + for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { + dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; + velocity_give_rx_desc(vptr->rd_ring + dirty); + } + + writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); + vptr->rd_filled = unusable; +} + +static int velocity_rx_refill(struct velocity_info *vptr) +{ + int dirty = vptr->rd_dirty, done = 0, ret = 0; + + do { + struct rx_desc *rd = vptr->rd_ring + dirty; + + /* Fine for an all zero Rx desc at init time as well */ + if (rd->rdesc0.owner == cpu_to_le32(OWNED_BY_NIC)) + break; + + if (!vptr->rd_info[dirty].skb) { + ret = velocity_alloc_rx_buf(vptr, dirty); + if (ret < 0) + break; + } + done++; + dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; + } while (dirty != vptr->rd_curr); + + if (done) { + vptr->rd_dirty = dirty; + vptr->rd_filled += done; + velocity_give_many_rx_descs(vptr); + } + + return ret; +} + /** * velocity_init_rd_ring - set up receive ring * @vptr: velocity to configure @@ -976,9 +1043,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) { - int i, ret = -ENOMEM; - struct rx_desc *rd; - struct velocity_rd_info *rd_info; + int ret = -ENOMEM; unsigned int rsize = sizeof(struct velocity_rd_info) * vptr->options.numrx; @@ -987,22 +1052,14 @@ goto out; memset(vptr->rd_info, 0, rsize); - /* Init the RD ring entries */ - for (i = 0; i < vptr->options.numrx; i++) { - rd = &(vptr->rd_ring[i]); - rd_info = &(vptr->rd_info[i]); + vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; - ret = velocity_alloc_rx_buf(vptr, i); - if (ret < 0) { - VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR - "%s: failed to allocate RX buffer.\n", - vptr->dev->name); - velocity_free_rd_ring(vptr); - goto out; - } - rd->rdesc0.owner = OWNED_BY_NIC; + ret = velocity_rx_refill(vptr); + if (ret < 0) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR + "%s: failed to allocate RX buffer.\n", vptr->dev->name); + velocity_free_rd_ring(vptr); } - vptr->rd_used = vptr->rd_curr = 0; out: return ret; } @@ -1025,7 +1082,7 @@ for (i = 0; i < vptr->options.numrx; i++) { struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); - if (!rd_info->skb_dma) + if (!rd_info->skb) continue; pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); @@ -1146,22 +1203,14 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) { - struct rx_desc *rd; struct net_device_stats *stats = &vptr->stats; - struct mac_regs * regs = vptr->mac_regs; int rd_curr = vptr->rd_curr; int works = 0; while (1) { + struct rx_desc *rd = vptr->rd_ring + rd_curr; - rd = &(vptr->rd_ring[rd_curr]); - - if ((vptr->rd_info[rd_curr]).skb == NULL) { - if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) - break; - } - - if (works++ > 15) + if (!vptr->rd_info[rd_curr].skb || (works++ > 15)) break; if (rd->rdesc0.owner == OWNED_BY_NIC) @@ -1169,17 +1218,10 @@ /* * Don't drop CE or RL error frame although RXOK is off - * FIXME: need to handle copybreak */ if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { - if (velocity_receive_frame(vptr, rd_curr) == 0) { - if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) { - VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not allocate rx buf\n", vptr->dev->name); - break; - } - } else { + if (velocity_receive_frame(vptr, rd_curr) < 0) stats->rx_dropped++; - } } else { if (rd->rdesc0.RSR & RSR_CRC) stats->rx_crc_errors++; @@ -1191,25 +1233,18 @@ rd->inten = 1; - if (++vptr->rd_used >= 4) { - int i, rd_prev = rd_curr; - for (i = 0; i < 4; i++) { - if (--rd_prev < 0) - rd_prev = vptr->options.numrx - 1; - - rd = &(vptr->rd_ring[rd_prev]); - rd->rdesc0.owner = OWNED_BY_NIC; - } - writew(4, &(regs->RBRDU)); - vptr->rd_used -= 4; - } - vptr->dev->last_rx = jiffies; rd_curr++; if (rd_curr >= vptr->options.numrx) rd_curr = 0; } + + if (velocity_rx_refill(vptr) < 0) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR + "%s: rx buf allocation failure\n", vptr->dev->name); + } + vptr->rd_curr = rd_curr; VAR_USED(stats); return works; @@ -1242,6 +1277,65 @@ } /** + * velocity_rx_copy - in place Rx copy for small packets + * @rx_skb: network layer packet buffer candidate + * @pkt_size: received data size + * @rd: receive packet descriptor + * @dev: network device + * + * Replace the current skb that is scheduled for Rx processing by a + * shorter, immediatly allocated skb, if the received packet is small + * enough. This function returns a negative value if the received + * packet is too big or if memory is exhausted. + */ +static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, + struct velocity_info *vptr) +{ + int ret = -1; + + if (pkt_size < rx_copybreak) { + struct sk_buff *new_skb; + + new_skb = dev_alloc_skb(pkt_size + 2); + if (new_skb) { + new_skb->dev = vptr->dev; + new_skb->ip_summed = rx_skb[0]->ip_summed; + + if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) + skb_reserve(new_skb, 2); + + memcpy(new_skb->data, rx_skb[0]->tail, pkt_size); + *rx_skb = new_skb; + ret = 0; + } + + } + return ret; +} + +/** + * velocity_iph_realign - IP header alignment + * @vptr: velocity we are handling + * @skb: network layer packet buffer + * @pkt_size: received data size + * + * Align IP header on a 2 bytes boundary. This behavior can be + * configured by the user. + */ +static inline void velocity_iph_realign(struct velocity_info *vptr, + struct sk_buff *skb, int pkt_size) +{ + /* FIXME - memmove ? */ + if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { + int i; + + for (i = pkt_size; i >= 0; i--) + *(skb->data + i + 2) = *(skb->data + i); + skb_reserve(skb, 2); + } +} + +/** * velocity_receive_frame - received packet processor * @vptr: velocity we are handling * @idx: ring index @@ -1252,9 +1346,11 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) { + void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); struct net_device_stats *stats = &vptr->stats; struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); struct rx_desc *rd = &(vptr->rd_ring[idx]); + int pkt_len = rd->rdesc0.len; struct sk_buff *skb; if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { @@ -1269,22 +1365,8 @@ skb = rd_info->skb; skb->dev = vptr->dev; - pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, - PCI_DMA_FROMDEVICE); - rd_info->skb_dma = (dma_addr_t) NULL; - rd_info->skb = NULL; - - /* FIXME - memmove ? */ - if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { - int i; - for (i = rd->rdesc0.len + 4; i >= 0; i--) - *(skb->data + i + 2) = *(skb->data + i); - skb->data += 2; - skb->tail += 2; - } - - skb_put(skb, (rd->rdesc0.len - 4)); - skb->protocol = eth_type_trans(skb, skb->dev); + pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, + vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); /* * Drop frame not meeting IEEE 802.3 @@ -1297,13 +1379,23 @@ } } + pci_action = pci_dma_sync_single_for_device; + velocity_rx_csum(rd, skb); - - /* - * FIXME: need rx_copybreak handling - */ - stats->rx_bytes += skb->len; + if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { + velocity_iph_realign(vptr, skb, pkt_len); + pci_action = pci_unmap_single; + rd_info->skb = NULL; + } + + pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, + PCI_DMA_FROMDEVICE); + + skb_put(skb, pkt_len - 4); + skb->protocol = eth_type_trans(skb, skb->dev); + + stats->rx_bytes += pkt_len; netif_rx(skb); return 0; @@ -1963,32 +2055,6 @@ /** - * ether_crc - ethernet CRC function - * - * Compute an ethernet CRC hash of the data block provided. This - * is not performance optimised but is not needed in performance - * critical code paths. - * - * FIXME: could we use shared code here ? - */ - -static inline u32 ether_crc(int length, unsigned char *data) -{ - static unsigned const ethernet_polynomial = 0x04c11db7U; - - int crc = -1; - - while (--length >= 0) { - unsigned char current_octet = *data++; - int bit; - for (bit = 0; bit < 8; bit++, current_octet >>= 1) { - crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); - } - } - return crc; -} - -/** * velocity_set_multi - filter list change callback * @dev: network device * @@ -2123,13 +2189,13 @@ */ static struct pci_driver velocity_driver = { - name:VELOCITY_NAME, - id_table:velocity_id_table, - probe:velocity_found1, - remove:velocity_remove1, + .name = VELOCITY_NAME, + .id_table = velocity_id_table, + .probe = velocity_found1, + .remove = __devexit_p(velocity_remove1), #ifdef CONFIG_PM - suspend:velocity_suspend, - resume:velocity_resume, + .suspend = velocity_suspend, + .resume = velocity_resume, #endif }; @@ -2147,9 +2213,6 @@ int ret; ret = pci_module_init(&velocity_driver); -#ifdef CONFIG_PM - register_inetaddr_notifier(&velocity_inetaddr_notifier); -#endif return ret; } @@ -2165,7 +2228,10 @@ static void __exit velocity_cleanup_module(void) { #ifdef CONFIG_PM - unregister_inetaddr_notifier(&velocity_inetaddr_notifier); + if (velocity_notifier_registered) { + unregister_inetaddr_notifier(&velocity_inetaddr_notifier); + velocity_notifier_registered = 0; + } #endif pci_unregister_driver(&velocity_driver); } @@ -2992,172 +3058,6 @@ } -static int velocity_suspend(struct pci_dev *pdev, u32 state) -{ - struct velocity_info *vptr = pci_get_drvdata(pdev); - unsigned long flags; - - if(!netif_running(vptr->dev)) - return 0; - - netif_device_detach(vptr->dev); - - spin_lock_irqsave(&vptr->lock, flags); - pci_save_state(pdev, vptr->pci_state); -#ifdef ETHTOOL_GWOL - if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { - velocity_get_ip(vptr); - velocity_save_context(vptr, &vptr->context); - velocity_shutdown(vptr); - velocity_set_wol(vptr); - pci_enable_wake(pdev, 3, 1); - pci_set_power_state(pdev, 3); - } else { - velocity_save_context(vptr, &vptr->context); - velocity_shutdown(vptr); - pci_disable_device(pdev); - pci_set_power_state(pdev, state); - } -#else - pci_set_power_state(pdev, state); -#endif - spin_unlock_irqrestore(&vptr->lock, flags); - return 0; -} - -static int velocity_resume(struct pci_dev *pdev) -{ - struct velocity_info *vptr = pci_get_drvdata(pdev); - unsigned long flags; - int i; - - if(!netif_running(vptr->dev)) - return 0; - - pci_set_power_state(pdev, 0); - pci_enable_wake(pdev, 0, 0); - pci_restore_state(pdev, vptr->pci_state); - - mac_wol_reset(vptr->mac_regs); - - spin_lock_irqsave(&vptr->lock, flags); - velocity_restore_context(vptr, &vptr->context); - velocity_init_registers(vptr, VELOCITY_INIT_WOL); - mac_disable_int(vptr->mac_regs); - - velocity_tx_srv(vptr, 0); - - for (i = 0; i < vptr->num_txq; i++) { - if (vptr->td_used[i]) { - mac_tx_queue_wake(vptr->mac_regs, i); - } - } - - mac_enable_int(vptr->mac_regs); - spin_unlock_irqrestore(&vptr->lock, flags); - netif_device_attach(vptr->dev); - - return 0; -} - -static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) -{ - struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; - struct net_device *dev; - struct velocity_info *vptr; - - if (ifa) { - dev = ifa->ifa_dev->dev; - vptr = dev->priv; - velocity_get_ip(vptr); - } - return NOTIFY_DONE; -} -#endif - -/* - * Purpose: Functions to set WOL. - */ - -const static unsigned short crc16_tab[256] = { - 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, - 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, - 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, - 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, - 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, - 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, - 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, - 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, - 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, - 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, - 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, - 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, - 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, - 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, - 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, - 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, - 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, - 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, - 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, - 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, - 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, - 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, - 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, - 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, - 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, - 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, - 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, - 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, - 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, - 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, - 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, - 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 -}; - - -static u32 mask_pattern[2][4] = { - {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ - {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ -}; - -/** - * ether_crc16 - compute ethernet CRC - * @len: buffer length - * @cp: buffer - * @crc16: initial CRC - * - * Compute a CRC value for a block of data. - * FIXME: can we use generic functions ? - */ - -static u16 ether_crc16(int len, u8 * cp, u16 crc16) -{ - while (len--) - crc16 = (crc16 >> 8) ^ crc16_tab[(crc16 ^ *cp++) & 0xff]; - return (crc16); -} - -/** - * bit_reverse - 16bit reverse - * @data: 16bit data t reverse - * - * Reverse the order of a 16bit value and return the reversed bits - */ - -static u16 bit_reverse(u16 data) -{ - u32 new = 0x00000000; - int ii; - - - for (ii = 0; ii < 16; ii++) { - new |= ((u32) (data & 1) << (31 - ii)); - data >>= 1; - } - - return (u16) (new >> 16); -} - /** * wol_calc_crc - WOL CRC * @pattern: data pattern @@ -3166,7 +3066,7 @@ * Compute the wake on lan crc hashes for the packet header * we are interested in. */ - + u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern) { u16 crc = 0xFFFF; @@ -3186,12 +3086,12 @@ continue; } mask >>= 1; - crc = ether_crc16(1, &(pattern[i * 8 + j]), crc); + crc = crc16(crc, &(pattern[i * 8 + j]), 1); } } /* Finally, invert the result once to get the correct data */ crc = ~crc; - return bit_reverse(crc); + return bitreverse(crc) >> 16; } /** @@ -3203,13 +3103,18 @@ * * FIXME: check static buffer is safe here */ - + static int velocity_set_wol(struct velocity_info *vptr) { struct mac_regs * regs = vptr->mac_regs; static u8 buf[256]; int i; + static u32 mask_pattern[2][4] = { + {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ + {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ + }; + writew(0xFFFF, ®s->WOLCRClr); writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet); writew(WOLCR_MAGIC_EN, ®s->WOLCRSet); @@ -3236,7 +3141,8 @@ memcpy(arp->ar_tip, vptr->ip_addr, 4); - crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, (u8 *) & mask_pattern[0][0]); + crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, + (u8 *) & mask_pattern[0][0]); writew(crc, ®s->PatternCRC[0]); writew(WOLCR_ARP_EN, ®s->WOLCRSet); @@ -3275,3 +3181,85 @@ return 0; } +static int velocity_suspend(struct pci_dev *pdev, u32 state) +{ + struct velocity_info *vptr = pci_get_drvdata(pdev); + unsigned long flags; + + if(!netif_running(vptr->dev)) + return 0; + + netif_device_detach(vptr->dev); + + spin_lock_irqsave(&vptr->lock, flags); + pci_save_state(pdev, vptr->pci_state); +#ifdef ETHTOOL_GWOL + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { + velocity_get_ip(vptr); + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + velocity_set_wol(vptr); + pci_enable_wake(pdev, 3, 1); + pci_set_power_state(pdev, 3); + } else { + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + pci_disable_device(pdev); + pci_set_power_state(pdev, state); + } +#else + pci_set_power_state(pdev, state); +#endif + spin_unlock_irqrestore(&vptr->lock, flags); + return 0; +} + +static int velocity_resume(struct pci_dev *pdev) +{ + struct velocity_info *vptr = pci_get_drvdata(pdev); + unsigned long flags; + int i; + + if(!netif_running(vptr->dev)) + return 0; + + pci_set_power_state(pdev, 0); + pci_enable_wake(pdev, 0, 0); + pci_restore_state(pdev, vptr->pci_state); + + mac_wol_reset(vptr->mac_regs); + + spin_lock_irqsave(&vptr->lock, flags); + velocity_restore_context(vptr, &vptr->context); + velocity_init_registers(vptr, VELOCITY_INIT_WOL); + mac_disable_int(vptr->mac_regs); + + velocity_tx_srv(vptr, 0); + + for (i = 0; i < vptr->num_txq; i++) { + if (vptr->td_used[i]) { + mac_tx_queue_wake(vptr->mac_regs, i); + } + } + + mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); + netif_device_attach(vptr->dev); + + return 0; +} + +static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev; + struct velocity_info *vptr; + + if (ifa) { + dev = ifa->ifa_dev->dev; + vptr = dev->priv; + velocity_get_ip(vptr); + } + return NOTIFY_DONE; +} +#endif diff -Nru a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h --- a/drivers/net/via-velocity.h 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/via-velocity.h 2004-07-13 12:46:19 -07:00 @@ -37,7 +37,6 @@ #define OPTION_DEFAULT { [0 ... MAX_UNITS-1] = -1} #define REV_ID_VT6110 (0) -#define DEVICE_ID (0x3119) #define BYTE_REG_BITS_ON(x,p) do { writeb(readb((p))|(x),(p));} while (0) #define WORD_REG_BITS_ON(x,p) do { writew(readw((p))|(x),(p));} while (0) @@ -1772,7 +1771,8 @@ struct velocity_td_info *td_infos[TX_QUEUE_NO]; int rd_curr; - int rd_used; + int rd_dirty; + u32 rd_filled; struct rx_desc *rd_ring; struct velocity_rd_info *rd_info; /* It's an array */ diff -Nru a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c --- a/drivers/net/wireless/prism54/oid_mgt.c 2004-07-13 12:46:19 -07:00 +++ b/drivers/net/wireless/prism54/oid_mgt.c 2004-07-13 12:46:19 -07:00 @@ -28,10 +28,6 @@ 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; -const int frequency_list_a[] = { 5170, 5180, 5190, 5200, 5210, 5220, 5230, - 5240, 5260, 5280, 5300, 5320 -}; - int channel_of_freq(int f) { @@ -41,10 +37,8 @@ while ((c < 14) && (f != frequency_list_bg[c])) c++; return (c >= 14) ? 0 : ++c; - } else if ((f >= (int) 5170) && (f <= (int) 5320)) { - while ((c < 12) && (f != frequency_list_a[c])) - c++; - return (c >= 12) ? 0 : (c + 37); + } else if ((f >= (int) 5000) && (f <= (int) 6000)) { + return ( (f - 5000) / 5 ); } else return 0; } diff -Nru a/include/linux/pci_ids.h b/include/linux/pci_ids.h --- a/include/linux/pci_ids.h 2004-07-13 12:46:19 -07:00 +++ b/include/linux/pci_ids.h 2004-07-13 12:46:19 -07:00 @@ -1063,21 +1063,33 @@ #define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036 +#define PCI_DEVICE_ID_NVIDIA_NVENET_10 0x0037 +#define PCI_DEVICE_ID_NVIDIA_NVENET_11 0x0038 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055 +#define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056 +#define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 +#define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066 #define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 +#define PCI_DEVICE_ID_NVIDIA_NVENET_4 0x0086 +#define PCI_DEVICE_ID_NVIDIA_NVENET_5 0x008c #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e #define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0 #define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1 #define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da #define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 +#define PCI_DEVICE_ID_NVIDIA_NVENET_3 0x00d6 +#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da +#define PCI_DEVICE_ID_NVIDIA_NVENET_7 0x00df +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 +#define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee #define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 @@ -1105,6 +1117,7 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4 #define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1 #define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc +#define PCI_DEVICE_ID_NVIDIA_NVENET_1 0x01c3 #define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 @@ -1218,6 +1231,7 @@ #define PCI_DEVICE_ID_VIA_8233C_0 0x3109 #define PCI_DEVICE_ID_VIA_8361 0x3112 #define PCI_DEVICE_ID_VIA_XM266 0x3116 +#define PCI_DEVICE_ID_VIA_612X 0x3119 #define PCI_DEVICE_ID_VIA_862X_0 0x3123 #define PCI_DEVICE_ID_VIA_8753_0 0x3128 #define PCI_DEVICE_ID_VIA_8233A 0x3147