1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] =
106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 " Written by Donald Becker\n";
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
124 I. Board Compatibility
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
128 II. Board-specific settings
130 III. Driver operation
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
140 IIIb/c. Transmit/Receive Structure
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack. Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames. New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets. When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine. Copying also preloads the cache, which is
158 most useful with small frames.
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
166 IIId. Synchronization
168 The driver runs as two independent, single-threaded flows of control. One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag. The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
221 static const struct pci_id_info pci_id_tbl[] = {
222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"},
225 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {"D-Link DL10050-based FAST Ethernet Adapter"},
227 {"Sundance Technology Alta"},
228 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 { } /* terminate list. */
232 /* This driver was written to use PCI memory space, however x86-oriented
233 hardware often uses I/O space accesses. */
235 /* Offsets to the device registers.
236 Unlike software-only systems, device drivers interact with complex hardware.
237 It's not useful to define symbolic names for every register bit in the
238 device. The name can only partially document the semantics and make
239 the driver longer and more difficult to read.
240 In general, only the important configuration values or bits changed
241 multiple times should be defined symbolically.
246 TxDMABurstThresh = 0x08,
247 TxDMAUrgentThresh = 0x09,
248 TxDMAPollPeriod = 0x0a,
253 RxDMABurstThresh = 0x14,
254 RxDMAUrgentThresh = 0x15,
255 RxDMAPollPeriod = 0x16,
275 MulticastFilter0 = 0x60,
276 MulticastFilter1 = 0x64,
283 StatsCarrierError = 0x74,
284 StatsLateColl = 0x75,
285 StatsMultiColl = 0x76,
289 StatsTxXSDefer = 0x7a,
295 /* Aliased and bogus values! */
299 #define ASIC_HI_WORD(x) ((x) + 2)
301 enum ASICCtrl_HiWord_bit {
302 GlobalReset = 0x0001,
307 NetworkReset = 0x0020,
312 /* Bits in the interrupt status/mask registers. */
313 enum intr_status_bits {
314 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
315 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
317 StatsMax=0x0080, LinkChange=0x0100,
318 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321 /* Bits in the RxMode register. */
323 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
324 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
326 /* Bits in MACCtrl. */
327 enum mac_ctrl0_bits {
328 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
329 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
331 enum mac_ctrl1_bits {
332 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
333 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
334 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337 /* Bits in WakeEvent register. */
338 enum wake_event_bits {
339 WakePktEnable = 0x01,
340 MagicPktEnable = 0x02,
341 LinkEventEnable = 0x04,
345 /* The Rx and Tx buffer descriptors. */
346 /* Note that using only 32 bit fields simplifies conversion to big-endian
351 struct desc_frag { __le32 addr, length; } frag[1];
354 /* Bits in netdev_desc.status */
355 enum desc_status_bits {
357 DescEndPacket=0x4000,
361 DescIntrOnDMADone=0x80000000,
362 DisableAlign = 0x00000001,
365 #define PRIV_ALIGN 15 /* Required alignment mask */
366 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
367 within the structure. */
369 struct netdev_private {
370 /* Descriptor rings first for alignment. */
371 struct netdev_desc *rx_ring;
372 struct netdev_desc *tx_ring;
373 struct sk_buff* rx_skbuff[RX_RING_SIZE];
374 struct sk_buff* tx_skbuff[TX_RING_SIZE];
375 dma_addr_t tx_ring_dma;
376 dma_addr_t rx_ring_dma;
377 struct timer_list timer; /* Media monitoring timer. */
378 /* ethtool extra stats */
380 u64 tx_multiple_collisions;
381 u64 tx_single_collisions;
382 u64 tx_late_collisions;
384 u64 tx_deferred_excessive;
391 /* Frequently used values: keep some adjacent for cache effect. */
395 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
396 unsigned int rx_buf_sz; /* Based on MTU+slack. */
397 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
398 unsigned int cur_tx, dirty_tx;
399 /* These values are keep track of the transceiver/media in use. */
400 unsigned int flowctrl:1;
401 unsigned int default_port:4; /* Last dev->if_port value. */
402 unsigned int an_enable:1;
404 unsigned int wol_enabled:1; /* Wake on LAN enabled */
405 struct tasklet_struct rx_tasklet;
406 struct tasklet_struct tx_tasklet;
409 /* Multicast and receive mode. */
410 spinlock_t mcastlock; /* SMP lock multicast updates. */
412 /* MII transceiver section. */
413 struct mii_if_info mii_if;
414 int mii_preamble_required;
415 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
416 struct pci_dev *pci_dev;
421 /* The station address location in the EEPROM. */
422 #define EEPROM_SA_OFFSET 0x10
423 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
424 IntrDrvRqst | IntrTxDone | StatsMax | \
427 static int change_mtu(struct net_device *dev, int new_mtu);
428 static int eeprom_read(void __iomem *ioaddr, int location);
429 static int mdio_read(struct net_device *dev, int phy_id, int location);
430 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
431 static int mdio_wait_link(struct net_device *dev, int wait);
432 static int netdev_open(struct net_device *dev);
433 static void check_duplex(struct net_device *dev);
434 static void netdev_timer(unsigned long data);
435 static void tx_timeout(struct net_device *dev);
436 static void init_ring(struct net_device *dev);
437 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
438 static int reset_tx (struct net_device *dev);
439 static irqreturn_t intr_handler(int irq, void *dev_instance);
440 static void rx_poll(unsigned long data);
441 static void tx_poll(unsigned long data);
442 static void refill_rx (struct net_device *dev);
443 static void netdev_error(struct net_device *dev, int intr_status);
444 static void netdev_error(struct net_device *dev, int intr_status);
445 static void set_rx_mode(struct net_device *dev);
446 static int __set_mac_addr(struct net_device *dev);
447 static int sundance_set_mac_addr(struct net_device *dev, void *data);
448 static struct net_device_stats *get_stats(struct net_device *dev);
449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
450 static int netdev_close(struct net_device *dev);
451 static const struct ethtool_ops ethtool_ops;
453 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
455 struct netdev_private *np = netdev_priv(dev);
456 void __iomem *ioaddr = np->base + ASICCtrl;
459 /* ST201 documentation states ASICCtrl is a 32bit register */
460 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
461 /* ST201 documentation states reset can take up to 1 ms */
463 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
464 if (--countdown == 0) {
465 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
472 static const struct net_device_ops netdev_ops = {
473 .ndo_open = netdev_open,
474 .ndo_stop = netdev_close,
475 .ndo_start_xmit = start_tx,
476 .ndo_get_stats = get_stats,
477 .ndo_set_rx_mode = set_rx_mode,
478 .ndo_do_ioctl = netdev_ioctl,
479 .ndo_tx_timeout = tx_timeout,
480 .ndo_change_mtu = change_mtu,
481 .ndo_set_mac_address = sundance_set_mac_addr,
482 .ndo_validate_addr = eth_validate_addr,
485 static int sundance_probe1(struct pci_dev *pdev,
486 const struct pci_device_id *ent)
488 struct net_device *dev;
489 struct netdev_private *np;
491 int chip_idx = ent->driver_data;
494 void __iomem *ioaddr;
503 int phy, phy_end, phy_idx = 0;
505 /* when built into the kernel, we only print version if device is found */
507 static int printed_version;
508 if (!printed_version++)
512 if (pci_enable_device(pdev))
514 pci_set_master(pdev);
518 dev = alloc_etherdev(sizeof(*np));
521 SET_NETDEV_DEV(dev, &pdev->dev);
523 if (pci_request_regions(pdev, DRV_NAME))
526 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
530 for (i = 0; i < 3; i++)
531 ((__le16 *)dev->dev_addr)[i] =
532 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
533 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
535 np = netdev_priv(dev);
538 np->chip_id = chip_idx;
539 np->msg_enable = (1 << debug) - 1;
540 spin_lock_init(&np->lock);
541 spin_lock_init(&np->statlock);
542 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
543 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
545 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
546 &ring_dma, GFP_KERNEL);
548 goto err_out_cleardev;
549 np->tx_ring = (struct netdev_desc *)ring_space;
550 np->tx_ring_dma = ring_dma;
552 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
553 &ring_dma, GFP_KERNEL);
555 goto err_out_unmap_tx;
556 np->rx_ring = (struct netdev_desc *)ring_space;
557 np->rx_ring_dma = ring_dma;
559 np->mii_if.dev = dev;
560 np->mii_if.mdio_read = mdio_read;
561 np->mii_if.mdio_write = mdio_write;
562 np->mii_if.phy_id_mask = 0x1f;
563 np->mii_if.reg_num_mask = 0x1f;
565 /* The chip-specific entries in the device structure. */
566 dev->netdev_ops = &netdev_ops;
567 SET_ETHTOOL_OPS(dev, ðtool_ops);
568 dev->watchdog_timeo = TX_TIMEOUT;
570 pci_set_drvdata(pdev, dev);
572 i = register_netdev(dev);
574 goto err_out_unmap_rx;
576 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
577 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
580 np->phys[0] = 1; /* Default setting */
581 np->mii_preamble_required++;
584 * It seems some phys doesn't deal well with address 0 being accessed
587 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
592 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
594 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
595 int phyx = phy & 0x1f;
596 int mii_status = mdio_read(dev, phyx, MII_BMSR);
597 if (mii_status != 0xffff && mii_status != 0x0000) {
598 np->phys[phy_idx++] = phyx;
599 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
600 if ((mii_status & 0x0040) == 0)
601 np->mii_preamble_required++;
602 printk(KERN_INFO "%s: MII PHY found at address %d, status "
603 "0x%4.4x advertising %4.4x.\n",
604 dev->name, phyx, mii_status, np->mii_if.advertising);
607 np->mii_preamble_required--;
610 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
611 dev->name, ioread32(ioaddr + ASICCtrl));
612 goto err_out_unregister;
615 np->mii_if.phy_id = np->phys[0];
617 /* Parse override configuration */
619 if (card_idx < MAX_UNITS) {
620 if (media[card_idx] != NULL) {
622 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
623 strcmp (media[card_idx], "4") == 0) {
625 np->mii_if.full_duplex = 1;
626 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
627 strcmp (media[card_idx], "3") == 0) {
629 np->mii_if.full_duplex = 0;
630 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
631 strcmp (media[card_idx], "2") == 0) {
633 np->mii_if.full_duplex = 1;
634 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
635 strcmp (media[card_idx], "1") == 0) {
637 np->mii_if.full_duplex = 0;
647 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
648 /* Default 100Mbps Full */
651 np->mii_if.full_duplex = 1;
656 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
658 /* If flow control enabled, we need to advertise it.*/
660 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
661 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
662 /* Force media type */
663 if (!np->an_enable) {
665 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
666 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
667 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
668 printk (KERN_INFO "Override speed=%d, %s duplex\n",
669 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
673 /* Perhaps move the reset here? */
674 /* Reset the chip to erase previous misconfiguration. */
675 if (netif_msg_hw(np))
676 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
677 sundance_reset(dev, 0x00ff << 16);
678 if (netif_msg_hw(np))
679 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
685 unregister_netdev(dev);
687 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
688 np->rx_ring, np->rx_ring_dma);
690 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
691 np->tx_ring, np->tx_ring_dma);
693 pci_set_drvdata(pdev, NULL);
694 pci_iounmap(pdev, ioaddr);
696 pci_release_regions(pdev);
702 static int change_mtu(struct net_device *dev, int new_mtu)
704 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
706 if (netif_running(dev))
712 #define eeprom_delay(ee_addr) ioread32(ee_addr)
713 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
714 static int eeprom_read(void __iomem *ioaddr, int location)
716 int boguscnt = 10000; /* Typical 1900 ticks. */
717 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
719 eeprom_delay(ioaddr + EECtrl);
720 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
721 return ioread16(ioaddr + EEData);
723 } while (--boguscnt > 0);
727 /* MII transceiver control section.
728 Read and write the MII registers using software-generated serial
729 MDIO protocol. See the MII specifications or DP83840A data sheet
732 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
733 met by back-to-back 33Mhz PCI cycles. */
734 #define mdio_delay() ioread8(mdio_addr)
737 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
739 #define MDIO_EnbIn (0)
740 #define MDIO_WRITE0 (MDIO_EnbOutput)
741 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
743 /* Generate the preamble required for initial synchronization and
744 a few older transceivers. */
745 static void mdio_sync(void __iomem *mdio_addr)
749 /* Establish sync by sending at least 32 logic ones. */
750 while (--bits >= 0) {
751 iowrite8(MDIO_WRITE1, mdio_addr);
753 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
758 static int mdio_read(struct net_device *dev, int phy_id, int location)
760 struct netdev_private *np = netdev_priv(dev);
761 void __iomem *mdio_addr = np->base + MIICtrl;
762 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
765 if (np->mii_preamble_required)
766 mdio_sync(mdio_addr);
768 /* Shift the read command bits out. */
769 for (i = 15; i >= 0; i--) {
770 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
772 iowrite8(dataval, mdio_addr);
774 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
777 /* Read the two transition, 16 data, and wire-idle bits. */
778 for (i = 19; i > 0; i--) {
779 iowrite8(MDIO_EnbIn, mdio_addr);
781 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
782 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
785 return (retval>>1) & 0xffff;
788 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
790 struct netdev_private *np = netdev_priv(dev);
791 void __iomem *mdio_addr = np->base + MIICtrl;
792 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
795 if (np->mii_preamble_required)
796 mdio_sync(mdio_addr);
798 /* Shift the command bits out. */
799 for (i = 31; i >= 0; i--) {
800 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
802 iowrite8(dataval, mdio_addr);
804 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
807 /* Clear out extra bits. */
808 for (i = 2; i > 0; i--) {
809 iowrite8(MDIO_EnbIn, mdio_addr);
811 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
816 static int mdio_wait_link(struct net_device *dev, int wait)
820 struct netdev_private *np;
822 np = netdev_priv(dev);
823 phy_id = np->phys[0];
826 bmsr = mdio_read(dev, phy_id, MII_BMSR);
830 } while (--wait > 0);
834 static int netdev_open(struct net_device *dev)
836 struct netdev_private *np = netdev_priv(dev);
837 void __iomem *ioaddr = np->base;
838 const int irq = np->pci_dev->irq;
842 sundance_reset(dev, 0x00ff << 16);
844 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
848 if (netif_msg_ifup(np))
849 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
853 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
854 /* The Tx list pointer is written as packets are queued. */
856 /* Initialize other registers. */
858 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
859 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
861 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
864 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
866 /* Configure the PCI bus bursts and FIFO thresholds. */
868 if (dev->if_port == 0)
869 dev->if_port = np->default_port;
871 spin_lock_init(&np->mcastlock);
874 iowrite16(0, ioaddr + IntrEnable);
875 iowrite16(0, ioaddr + DownCounter);
876 /* Set the chip to poll every N*320nsec. */
877 iowrite8(100, ioaddr + RxDMAPollPeriod);
878 iowrite8(127, ioaddr + TxDMAPollPeriod);
879 /* Fix DFE-580TX packet drop issue */
880 if (np->pci_dev->revision >= 0x14)
881 iowrite8(0x01, ioaddr + DebugCtrl1);
882 netif_start_queue(dev);
884 spin_lock_irqsave(&np->lock, flags);
886 spin_unlock_irqrestore(&np->lock, flags);
888 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
891 iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
894 if (netif_msg_ifup(np))
895 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
896 "MAC Control %x, %4.4x %4.4x.\n",
897 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
898 ioread32(ioaddr + MACCtrl0),
899 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
901 /* Set the timer to check for link beat. */
902 init_timer(&np->timer);
903 np->timer.expires = jiffies + 3*HZ;
904 np->timer.data = (unsigned long)dev;
905 np->timer.function = netdev_timer; /* timer handler */
906 add_timer(&np->timer);
908 /* Enable interrupts by setting the interrupt mask. */
909 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
914 static void check_duplex(struct net_device *dev)
916 struct netdev_private *np = netdev_priv(dev);
917 void __iomem *ioaddr = np->base;
918 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
919 int negotiated = mii_lpa & np->mii_if.advertising;
923 if (!np->an_enable || mii_lpa == 0xffff) {
924 if (np->mii_if.full_duplex)
925 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
930 /* Autonegotiation */
931 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
932 if (np->mii_if.full_duplex != duplex) {
933 np->mii_if.full_duplex = duplex;
934 if (netif_msg_link(np))
935 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
936 "negotiated capability %4.4x.\n", dev->name,
937 duplex ? "full" : "half", np->phys[0], negotiated);
938 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
942 static void netdev_timer(unsigned long data)
944 struct net_device *dev = (struct net_device *)data;
945 struct netdev_private *np = netdev_priv(dev);
946 void __iomem *ioaddr = np->base;
947 int next_tick = 10*HZ;
949 if (netif_msg_timer(np)) {
950 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
952 dev->name, ioread16(ioaddr + IntrEnable),
953 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
956 np->timer.expires = jiffies + next_tick;
957 add_timer(&np->timer);
960 static void tx_timeout(struct net_device *dev)
962 struct netdev_private *np = netdev_priv(dev);
963 void __iomem *ioaddr = np->base;
966 netif_stop_queue(dev);
967 tasklet_disable(&np->tx_tasklet);
968 iowrite16(0, ioaddr + IntrEnable);
969 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
971 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
972 ioread8(ioaddr + TxFrameId));
976 for (i=0; i<TX_RING_SIZE; i++) {
977 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
978 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
979 le32_to_cpu(np->tx_ring[i].next_desc),
980 le32_to_cpu(np->tx_ring[i].status),
981 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
982 le32_to_cpu(np->tx_ring[i].frag[0].addr),
983 le32_to_cpu(np->tx_ring[i].frag[0].length));
985 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
986 ioread32(np->base + TxListPtr),
987 netif_queue_stopped(dev));
988 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
989 np->cur_tx, np->cur_tx % TX_RING_SIZE,
990 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
991 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
992 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
994 spin_lock_irqsave(&np->lock, flag);
996 /* Stop and restart the chip's Tx processes . */
998 spin_unlock_irqrestore(&np->lock, flag);
1002 dev->trans_start = jiffies; /* prevent tx timeout */
1003 dev->stats.tx_errors++;
1004 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1005 netif_wake_queue(dev);
1007 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1008 tasklet_enable(&np->tx_tasklet);
1012 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1013 static void init_ring(struct net_device *dev)
1015 struct netdev_private *np = netdev_priv(dev);
1018 np->cur_rx = np->cur_tx = 0;
1019 np->dirty_rx = np->dirty_tx = 0;
1022 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1024 /* Initialize all Rx descriptors. */
1025 for (i = 0; i < RX_RING_SIZE; i++) {
1026 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1027 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1028 np->rx_ring[i].status = 0;
1029 np->rx_ring[i].frag[0].length = 0;
1030 np->rx_skbuff[i] = NULL;
1033 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1034 for (i = 0; i < RX_RING_SIZE; i++) {
1035 struct sk_buff *skb =
1036 netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1037 np->rx_skbuff[i] = skb;
1040 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1041 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1042 dma_map_single(&np->pci_dev->dev, skb->data,
1043 np->rx_buf_sz, DMA_FROM_DEVICE));
1044 if (dma_mapping_error(&np->pci_dev->dev,
1045 np->rx_ring[i].frag[0].addr)) {
1047 np->rx_skbuff[i] = NULL;
1050 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1052 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1054 for (i = 0; i < TX_RING_SIZE; i++) {
1055 np->tx_skbuff[i] = NULL;
1056 np->tx_ring[i].status = 0;
1060 static void tx_poll (unsigned long data)
1062 struct net_device *dev = (struct net_device *)data;
1063 struct netdev_private *np = netdev_priv(dev);
1064 unsigned head = np->cur_task % TX_RING_SIZE;
1065 struct netdev_desc *txdesc =
1066 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1068 /* Chain the next pointer */
1069 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1070 int entry = np->cur_task % TX_RING_SIZE;
1071 txdesc = &np->tx_ring[entry];
1073 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1074 entry*sizeof(struct netdev_desc));
1076 np->last_tx = txdesc;
1078 /* Indicate the latest descriptor of tx ring */
1079 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1081 if (ioread32 (np->base + TxListPtr) == 0)
1082 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1083 np->base + TxListPtr);
1087 start_tx (struct sk_buff *skb, struct net_device *dev)
1089 struct netdev_private *np = netdev_priv(dev);
1090 struct netdev_desc *txdesc;
1093 /* Calculate the next Tx descriptor entry. */
1094 entry = np->cur_tx % TX_RING_SIZE;
1095 np->tx_skbuff[entry] = skb;
1096 txdesc = &np->tx_ring[entry];
1098 txdesc->next_desc = 0;
1099 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1100 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1101 skb->data, skb->len, DMA_TO_DEVICE));
1102 if (dma_mapping_error(&np->pci_dev->dev,
1103 txdesc->frag[0].addr))
1105 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1107 /* Increment cur_tx before tasklet_schedule() */
1110 /* Schedule a tx_poll() task */
1111 tasklet_schedule(&np->tx_tasklet);
1113 /* On some architectures: explicitly flush cache lines here. */
1114 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1115 !netif_queue_stopped(dev)) {
1118 netif_stop_queue (dev);
1120 if (netif_msg_tx_queued(np)) {
1122 "%s: Transmit frame #%d queued in slot %d.\n",
1123 dev->name, np->cur_tx, entry);
1125 return NETDEV_TX_OK;
1129 np->tx_skbuff[entry] = NULL;
1130 dev->stats.tx_dropped++;
1131 return NETDEV_TX_OK;
1134 /* Reset hardware tx and free all of tx buffers */
1136 reset_tx (struct net_device *dev)
1138 struct netdev_private *np = netdev_priv(dev);
1139 void __iomem *ioaddr = np->base;
1140 struct sk_buff *skb;
1143 /* Reset tx logic, TxListPtr will be cleaned */
1144 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1145 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1147 /* free all tx skbuff */
1148 for (i = 0; i < TX_RING_SIZE; i++) {
1149 np->tx_ring[i].next_desc = 0;
1151 skb = np->tx_skbuff[i];
1153 dma_unmap_single(&np->pci_dev->dev,
1154 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1155 skb->len, DMA_TO_DEVICE);
1156 dev_kfree_skb_any(skb);
1157 np->tx_skbuff[i] = NULL;
1158 dev->stats.tx_dropped++;
1161 np->cur_tx = np->dirty_tx = 0;
1165 iowrite8(127, ioaddr + TxDMAPollPeriod);
1167 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1171 /* The interrupt handler cleans up after the Tx thread,
1172 and schedule a Rx thread work */
1173 static irqreturn_t intr_handler(int irq, void *dev_instance)
1175 struct net_device *dev = (struct net_device *)dev_instance;
1176 struct netdev_private *np = netdev_priv(dev);
1177 void __iomem *ioaddr = np->base;
1186 int intr_status = ioread16(ioaddr + IntrStatus);
1187 iowrite16(intr_status, ioaddr + IntrStatus);
1189 if (netif_msg_intr(np))
1190 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1191 dev->name, intr_status);
1193 if (!(intr_status & DEFAULT_INTR))
1198 if (intr_status & (IntrRxDMADone)) {
1199 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1200 ioaddr + IntrEnable);
1202 np->budget = RX_BUDGET;
1203 tasklet_schedule(&np->rx_tasklet);
1205 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1206 tx_status = ioread16 (ioaddr + TxStatus);
1207 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1208 if (netif_msg_tx_done(np))
1210 ("%s: Transmit status is %2.2x.\n",
1211 dev->name, tx_status);
1212 if (tx_status & 0x1e) {
1213 if (netif_msg_tx_err(np))
1214 printk("%s: Transmit error status %4.4x.\n",
1215 dev->name, tx_status);
1216 dev->stats.tx_errors++;
1217 if (tx_status & 0x10)
1218 dev->stats.tx_fifo_errors++;
1219 if (tx_status & 0x08)
1220 dev->stats.collisions++;
1221 if (tx_status & 0x04)
1222 dev->stats.tx_fifo_errors++;
1223 if (tx_status & 0x02)
1224 dev->stats.tx_window_errors++;
1227 ** This reset has been verified on
1228 ** DFE-580TX boards ! phdm@macqel.be.
1230 if (tx_status & 0x10) { /* TxUnderrun */
1231 /* Restart Tx FIFO and transmitter */
1232 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1233 /* No need to reset the Tx pointer here */
1235 /* Restart the Tx. Need to make sure tx enabled */
1238 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1239 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1244 /* Yup, this is a documentation bug. It cost me *hours*. */
1245 iowrite16 (0, ioaddr + TxStatus);
1247 iowrite32(5000, ioaddr + DownCounter);
1250 tx_status = ioread16 (ioaddr + TxStatus);
1252 hw_frame_id = (tx_status >> 8) & 0xff;
1254 hw_frame_id = ioread8(ioaddr + TxFrameId);
1257 if (np->pci_dev->revision >= 0x14) {
1258 spin_lock(&np->lock);
1259 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1260 int entry = np->dirty_tx % TX_RING_SIZE;
1261 struct sk_buff *skb;
1263 sw_frame_id = (le32_to_cpu(
1264 np->tx_ring[entry].status) >> 2) & 0xff;
1265 if (sw_frame_id == hw_frame_id &&
1266 !(le32_to_cpu(np->tx_ring[entry].status)
1269 if (sw_frame_id == (hw_frame_id + 1) %
1272 skb = np->tx_skbuff[entry];
1273 /* Free the original skb. */
1274 dma_unmap_single(&np->pci_dev->dev,
1275 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1276 skb->len, DMA_TO_DEVICE);
1277 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1278 np->tx_skbuff[entry] = NULL;
1279 np->tx_ring[entry].frag[0].addr = 0;
1280 np->tx_ring[entry].frag[0].length = 0;
1282 spin_unlock(&np->lock);
1284 spin_lock(&np->lock);
1285 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1286 int entry = np->dirty_tx % TX_RING_SIZE;
1287 struct sk_buff *skb;
1288 if (!(le32_to_cpu(np->tx_ring[entry].status)
1291 skb = np->tx_skbuff[entry];
1292 /* Free the original skb. */
1293 dma_unmap_single(&np->pci_dev->dev,
1294 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1295 skb->len, DMA_TO_DEVICE);
1296 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1297 np->tx_skbuff[entry] = NULL;
1298 np->tx_ring[entry].frag[0].addr = 0;
1299 np->tx_ring[entry].frag[0].length = 0;
1301 spin_unlock(&np->lock);
1304 if (netif_queue_stopped(dev) &&
1305 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1306 /* The ring is no longer full, clear busy flag. */
1307 netif_wake_queue (dev);
1309 /* Abnormal error summary/uncommon events handlers. */
1310 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1311 netdev_error(dev, intr_status);
1313 if (netif_msg_intr(np))
1314 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1315 dev->name, ioread16(ioaddr + IntrStatus));
1316 return IRQ_RETVAL(handled);
1319 static void rx_poll(unsigned long data)
1321 struct net_device *dev = (struct net_device *)data;
1322 struct netdev_private *np = netdev_priv(dev);
1323 int entry = np->cur_rx % RX_RING_SIZE;
1324 int boguscnt = np->budget;
1325 void __iomem *ioaddr = np->base;
1328 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1330 struct netdev_desc *desc = &(np->rx_ring[entry]);
1331 u32 frame_status = le32_to_cpu(desc->status);
1334 if (--boguscnt < 0) {
1337 if (!(frame_status & DescOwn))
1339 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1340 if (netif_msg_rx_status(np))
1341 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1343 if (frame_status & 0x001f4000) {
1344 /* There was a error. */
1345 if (netif_msg_rx_err(np))
1346 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1348 dev->stats.rx_errors++;
1349 if (frame_status & 0x00100000)
1350 dev->stats.rx_length_errors++;
1351 if (frame_status & 0x00010000)
1352 dev->stats.rx_fifo_errors++;
1353 if (frame_status & 0x00060000)
1354 dev->stats.rx_frame_errors++;
1355 if (frame_status & 0x00080000)
1356 dev->stats.rx_crc_errors++;
1357 if (frame_status & 0x00100000) {
1358 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1360 dev->name, frame_status);
1363 struct sk_buff *skb;
1364 #ifndef final_version
1365 if (netif_msg_rx_status(np))
1366 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1367 ", bogus_cnt %d.\n",
1370 /* Check if the packet is long enough to accept without copying
1371 to a minimally-sized skbuff. */
1372 if (pkt_len < rx_copybreak &&
1373 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1374 skb_reserve(skb, 2); /* 16 byte align the IP header */
1375 dma_sync_single_for_cpu(&np->pci_dev->dev,
1376 le32_to_cpu(desc->frag[0].addr),
1377 np->rx_buf_sz, DMA_FROM_DEVICE);
1378 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1379 dma_sync_single_for_device(&np->pci_dev->dev,
1380 le32_to_cpu(desc->frag[0].addr),
1381 np->rx_buf_sz, DMA_FROM_DEVICE);
1382 skb_put(skb, pkt_len);
1384 dma_unmap_single(&np->pci_dev->dev,
1385 le32_to_cpu(desc->frag[0].addr),
1386 np->rx_buf_sz, DMA_FROM_DEVICE);
1387 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1388 np->rx_skbuff[entry] = NULL;
1390 skb->protocol = eth_type_trans(skb, dev);
1391 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1394 entry = (entry + 1) % RX_RING_SIZE;
1399 np->budget -= received;
1400 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1408 np->budget -= received;
1409 if (np->budget <= 0)
1410 np->budget = RX_BUDGET;
1411 tasklet_schedule(&np->rx_tasklet);
1414 static void refill_rx (struct net_device *dev)
1416 struct netdev_private *np = netdev_priv(dev);
1420 /* Refill the Rx ring buffers. */
1421 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1422 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1423 struct sk_buff *skb;
1424 entry = np->dirty_rx % RX_RING_SIZE;
1425 if (np->rx_skbuff[entry] == NULL) {
1426 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1427 np->rx_skbuff[entry] = skb;
1429 break; /* Better luck next round. */
1430 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1431 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1432 dma_map_single(&np->pci_dev->dev, skb->data,
1433 np->rx_buf_sz, DMA_FROM_DEVICE));
1434 if (dma_mapping_error(&np->pci_dev->dev,
1435 np->rx_ring[entry].frag[0].addr)) {
1436 dev_kfree_skb_irq(skb);
1437 np->rx_skbuff[entry] = NULL;
1441 /* Perhaps we need not reset this field. */
1442 np->rx_ring[entry].frag[0].length =
1443 cpu_to_le32(np->rx_buf_sz | LastFrag);
1444 np->rx_ring[entry].status = 0;
1448 static void netdev_error(struct net_device *dev, int intr_status)
1450 struct netdev_private *np = netdev_priv(dev);
1451 void __iomem *ioaddr = np->base;
1452 u16 mii_ctl, mii_advertise, mii_lpa;
1455 if (intr_status & LinkChange) {
1456 if (mdio_wait_link(dev, 10) == 0) {
1457 printk(KERN_INFO "%s: Link up\n", dev->name);
1458 if (np->an_enable) {
1459 mii_advertise = mdio_read(dev, np->phys[0],
1461 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1462 mii_advertise &= mii_lpa;
1463 printk(KERN_INFO "%s: Link changed: ",
1465 if (mii_advertise & ADVERTISE_100FULL) {
1467 printk("100Mbps, full duplex\n");
1468 } else if (mii_advertise & ADVERTISE_100HALF) {
1470 printk("100Mbps, half duplex\n");
1471 } else if (mii_advertise & ADVERTISE_10FULL) {
1473 printk("10Mbps, full duplex\n");
1474 } else if (mii_advertise & ADVERTISE_10HALF) {
1476 printk("10Mbps, half duplex\n");
1481 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1482 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1484 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1486 printk("%s duplex.\n",
1487 (mii_ctl & BMCR_FULLDPLX) ?
1491 if (np->flowctrl && np->mii_if.full_duplex) {
1492 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1493 ioaddr + MulticastFilter1+2);
1494 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1497 netif_carrier_on(dev);
1499 printk(KERN_INFO "%s: Link down\n", dev->name);
1500 netif_carrier_off(dev);
1503 if (intr_status & StatsMax) {
1506 if (intr_status & IntrPCIErr) {
1507 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1508 dev->name, intr_status);
1509 /* We must do a global reset of DMA to continue. */
1513 static struct net_device_stats *get_stats(struct net_device *dev)
1515 struct netdev_private *np = netdev_priv(dev);
1516 void __iomem *ioaddr = np->base;
1517 unsigned long flags;
1518 u8 late_coll, single_coll, mult_coll;
1520 spin_lock_irqsave(&np->statlock, flags);
1521 /* The chip only need report frame silently dropped. */
1522 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1523 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1524 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1525 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1527 mult_coll = ioread8(ioaddr + StatsMultiColl);
1528 np->xstats.tx_multiple_collisions += mult_coll;
1529 single_coll = ioread8(ioaddr + StatsOneColl);
1530 np->xstats.tx_single_collisions += single_coll;
1531 late_coll = ioread8(ioaddr + StatsLateColl);
1532 np->xstats.tx_late_collisions += late_coll;
1533 dev->stats.collisions += mult_coll
1537 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1538 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1539 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1540 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1541 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1542 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1543 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1545 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1546 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1547 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1548 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1550 spin_unlock_irqrestore(&np->statlock, flags);
1555 static void set_rx_mode(struct net_device *dev)
1557 struct netdev_private *np = netdev_priv(dev);
1558 void __iomem *ioaddr = np->base;
1559 u16 mc_filter[4]; /* Multicast hash filter */
1563 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1564 memset(mc_filter, 0xff, sizeof(mc_filter));
1565 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1566 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1567 (dev->flags & IFF_ALLMULTI)) {
1568 /* Too many to match, or accept all multicasts. */
1569 memset(mc_filter, 0xff, sizeof(mc_filter));
1570 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1571 } else if (!netdev_mc_empty(dev)) {
1572 struct netdev_hw_addr *ha;
1576 memset (mc_filter, 0, sizeof (mc_filter));
1577 netdev_for_each_mc_addr(ha, dev) {
1578 crc = ether_crc_le(ETH_ALEN, ha->addr);
1579 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1580 if (crc & 0x80000000) index |= 1 << bit;
1581 mc_filter[index/16] |= (1 << (index % 16));
1583 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1585 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1588 if (np->mii_if.full_duplex && np->flowctrl)
1589 mc_filter[3] |= 0x0200;
1591 for (i = 0; i < 4; i++)
1592 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1593 iowrite8(rx_mode, ioaddr + RxMode);
1596 static int __set_mac_addr(struct net_device *dev)
1598 struct netdev_private *np = netdev_priv(dev);
1601 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1602 iowrite16(addr16, np->base + StationAddr);
1603 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1604 iowrite16(addr16, np->base + StationAddr+2);
1605 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1606 iowrite16(addr16, np->base + StationAddr+4);
1610 /* Invoked with rtnl_lock held */
1611 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1613 const struct sockaddr *addr = data;
1615 if (!is_valid_ether_addr(addr->sa_data))
1616 return -EADDRNOTAVAIL;
1617 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1618 __set_mac_addr(dev);
1623 static const struct {
1624 const char name[ETH_GSTRING_LEN];
1625 } sundance_stats[] = {
1626 { "tx_multiple_collisions" },
1627 { "tx_single_collisions" },
1628 { "tx_late_collisions" },
1630 { "tx_deferred_excessive" },
1638 static int check_if_running(struct net_device *dev)
1640 if (!netif_running(dev))
1645 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1647 struct netdev_private *np = netdev_priv(dev);
1648 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1649 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1650 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1653 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1655 struct netdev_private *np = netdev_priv(dev);
1656 spin_lock_irq(&np->lock);
1657 mii_ethtool_gset(&np->mii_if, ecmd);
1658 spin_unlock_irq(&np->lock);
1662 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1664 struct netdev_private *np = netdev_priv(dev);
1666 spin_lock_irq(&np->lock);
1667 res = mii_ethtool_sset(&np->mii_if, ecmd);
1668 spin_unlock_irq(&np->lock);
1672 static int nway_reset(struct net_device *dev)
1674 struct netdev_private *np = netdev_priv(dev);
1675 return mii_nway_restart(&np->mii_if);
1678 static u32 get_link(struct net_device *dev)
1680 struct netdev_private *np = netdev_priv(dev);
1681 return mii_link_ok(&np->mii_if);
1684 static u32 get_msglevel(struct net_device *dev)
1686 struct netdev_private *np = netdev_priv(dev);
1687 return np->msg_enable;
1690 static void set_msglevel(struct net_device *dev, u32 val)
1692 struct netdev_private *np = netdev_priv(dev);
1693 np->msg_enable = val;
1696 static void get_strings(struct net_device *dev, u32 stringset,
1699 if (stringset == ETH_SS_STATS)
1700 memcpy(data, sundance_stats, sizeof(sundance_stats));
1703 static int get_sset_count(struct net_device *dev, int sset)
1707 return ARRAY_SIZE(sundance_stats);
1713 static void get_ethtool_stats(struct net_device *dev,
1714 struct ethtool_stats *stats, u64 *data)
1716 struct netdev_private *np = netdev_priv(dev);
1720 data[i++] = np->xstats.tx_multiple_collisions;
1721 data[i++] = np->xstats.tx_single_collisions;
1722 data[i++] = np->xstats.tx_late_collisions;
1723 data[i++] = np->xstats.tx_deferred;
1724 data[i++] = np->xstats.tx_deferred_excessive;
1725 data[i++] = np->xstats.tx_aborted;
1726 data[i++] = np->xstats.tx_bcasts;
1727 data[i++] = np->xstats.rx_bcasts;
1728 data[i++] = np->xstats.tx_mcasts;
1729 data[i++] = np->xstats.rx_mcasts;
1734 static void sundance_get_wol(struct net_device *dev,
1735 struct ethtool_wolinfo *wol)
1737 struct netdev_private *np = netdev_priv(dev);
1738 void __iomem *ioaddr = np->base;
1743 wol->supported = (WAKE_PHY | WAKE_MAGIC);
1744 if (!np->wol_enabled)
1747 wol_bits = ioread8(ioaddr + WakeEvent);
1748 if (wol_bits & MagicPktEnable)
1749 wol->wolopts |= WAKE_MAGIC;
1750 if (wol_bits & LinkEventEnable)
1751 wol->wolopts |= WAKE_PHY;
1754 static int sundance_set_wol(struct net_device *dev,
1755 struct ethtool_wolinfo *wol)
1757 struct netdev_private *np = netdev_priv(dev);
1758 void __iomem *ioaddr = np->base;
1761 if (!device_can_wakeup(&np->pci_dev->dev))
1764 np->wol_enabled = !!(wol->wolopts);
1765 wol_bits = ioread8(ioaddr + WakeEvent);
1766 wol_bits &= ~(WakePktEnable | MagicPktEnable |
1767 LinkEventEnable | WolEnable);
1769 if (np->wol_enabled) {
1770 if (wol->wolopts & WAKE_MAGIC)
1771 wol_bits |= (MagicPktEnable | WolEnable);
1772 if (wol->wolopts & WAKE_PHY)
1773 wol_bits |= (LinkEventEnable | WolEnable);
1775 iowrite8(wol_bits, ioaddr + WakeEvent);
1777 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1782 #define sundance_get_wol NULL
1783 #define sundance_set_wol NULL
1784 #endif /* CONFIG_PM */
1786 static const struct ethtool_ops ethtool_ops = {
1787 .begin = check_if_running,
1788 .get_drvinfo = get_drvinfo,
1789 .get_settings = get_settings,
1790 .set_settings = set_settings,
1791 .nway_reset = nway_reset,
1792 .get_link = get_link,
1793 .get_wol = sundance_get_wol,
1794 .set_wol = sundance_set_wol,
1795 .get_msglevel = get_msglevel,
1796 .set_msglevel = set_msglevel,
1797 .get_strings = get_strings,
1798 .get_sset_count = get_sset_count,
1799 .get_ethtool_stats = get_ethtool_stats,
1802 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1804 struct netdev_private *np = netdev_priv(dev);
1807 if (!netif_running(dev))
1810 spin_lock_irq(&np->lock);
1811 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1812 spin_unlock_irq(&np->lock);
1817 static int netdev_close(struct net_device *dev)
1819 struct netdev_private *np = netdev_priv(dev);
1820 void __iomem *ioaddr = np->base;
1821 struct sk_buff *skb;
1824 /* Wait and kill tasklet */
1825 tasklet_kill(&np->rx_tasklet);
1826 tasklet_kill(&np->tx_tasklet);
1832 netif_stop_queue(dev);
1834 if (netif_msg_ifdown(np)) {
1835 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1836 "Rx %4.4x Int %2.2x.\n",
1837 dev->name, ioread8(ioaddr + TxStatus),
1838 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1839 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1840 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1843 /* Disable interrupts by clearing the interrupt mask. */
1844 iowrite16(0x0000, ioaddr + IntrEnable);
1846 /* Disable Rx and Tx DMA for safely release resource */
1847 iowrite32(0x500, ioaddr + DMACtrl);
1849 /* Stop the chip's Tx and Rx processes. */
1850 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1852 for (i = 2000; i > 0; i--) {
1853 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1858 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1859 ioaddr + ASIC_HI_WORD(ASICCtrl));
1861 for (i = 2000; i > 0; i--) {
1862 if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1868 if (netif_msg_hw(np)) {
1869 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1870 (int)(np->tx_ring_dma));
1871 for (i = 0; i < TX_RING_SIZE; i++)
1872 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1873 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1874 np->tx_ring[i].frag[0].length);
1875 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1876 (int)(np->rx_ring_dma));
1877 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1878 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1879 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1880 np->rx_ring[i].frag[0].length);
1883 #endif /* __i386__ debugging only */
1885 free_irq(np->pci_dev->irq, dev);
1887 del_timer_sync(&np->timer);
1889 /* Free all the skbuffs in the Rx queue. */
1890 for (i = 0; i < RX_RING_SIZE; i++) {
1891 np->rx_ring[i].status = 0;
1892 skb = np->rx_skbuff[i];
1894 dma_unmap_single(&np->pci_dev->dev,
1895 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1896 np->rx_buf_sz, DMA_FROM_DEVICE);
1898 np->rx_skbuff[i] = NULL;
1900 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1902 for (i = 0; i < TX_RING_SIZE; i++) {
1903 np->tx_ring[i].next_desc = 0;
1904 skb = np->tx_skbuff[i];
1906 dma_unmap_single(&np->pci_dev->dev,
1907 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1908 skb->len, DMA_TO_DEVICE);
1910 np->tx_skbuff[i] = NULL;
1917 static void sundance_remove1(struct pci_dev *pdev)
1919 struct net_device *dev = pci_get_drvdata(pdev);
1922 struct netdev_private *np = netdev_priv(dev);
1923 unregister_netdev(dev);
1924 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1925 np->rx_ring, np->rx_ring_dma);
1926 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1927 np->tx_ring, np->tx_ring_dma);
1928 pci_iounmap(pdev, np->base);
1929 pci_release_regions(pdev);
1931 pci_set_drvdata(pdev, NULL);
1937 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1939 struct net_device *dev = pci_get_drvdata(pci_dev);
1940 struct netdev_private *np = netdev_priv(dev);
1941 void __iomem *ioaddr = np->base;
1943 if (!netif_running(dev))
1947 netif_device_detach(dev);
1949 pci_save_state(pci_dev);
1950 if (np->wol_enabled) {
1951 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1952 iowrite16(RxEnable, ioaddr + MACCtrl1);
1954 pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1956 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1961 static int sundance_resume(struct pci_dev *pci_dev)
1963 struct net_device *dev = pci_get_drvdata(pci_dev);
1966 if (!netif_running(dev))
1969 pci_set_power_state(pci_dev, PCI_D0);
1970 pci_restore_state(pci_dev);
1971 pci_enable_wake(pci_dev, PCI_D0, 0);
1973 err = netdev_open(dev);
1975 printk(KERN_ERR "%s: Can't resume interface!\n",
1980 netif_device_attach(dev);
1986 #endif /* CONFIG_PM */
1988 static struct pci_driver sundance_driver = {
1990 .id_table = sundance_pci_tbl,
1991 .probe = sundance_probe1,
1992 .remove = sundance_remove1,
1994 .suspend = sundance_suspend,
1995 .resume = sundance_resume,
1996 #endif /* CONFIG_PM */
1999 static int __init sundance_init(void)
2001 /* when a module, this is printed whether or not devices are found in probe */
2005 return pci_register_driver(&sundance_driver);
2008 static void __exit sundance_exit(void)
2010 pci_unregister_driver(&sundance_driver);
2013 module_init(sundance_init);
2014 module_exit(sundance_exit);