]> rtime.felk.cvut.cz Git - lisovros/linux_canprio.git/blob - drivers/net/tg3.c
tg3: Fix firmware event timeouts
[lisovros/linux_canprio.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.92.1"
68 #define DRV_MODULE_RELDATE      "June 9, 2008"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808 {
809         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
810         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
811 }
812
813 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
814 {
815         u32 phy;
816
817         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
818             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
819                 return;
820
821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
822                 u32 ephy;
823
824                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
825                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
826                                      ephy | MII_TG3_EPHY_SHADOW_EN);
827                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
828                                 if (enable)
829                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
830                                 else
831                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
832                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
833                         }
834                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
835                 }
836         } else {
837                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
838                       MII_TG3_AUXCTL_SHDWSEL_MISC;
839                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
840                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
841                         if (enable)
842                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
843                         else
844                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
845                         phy |= MII_TG3_AUXCTL_MISC_WREN;
846                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
847                 }
848         }
849 }
850
851 static void tg3_phy_set_wirespeed(struct tg3 *tp)
852 {
853         u32 val;
854
855         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
856                 return;
857
858         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
859             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
860                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
861                              (val | (1 << 15) | (1 << 4)));
862 }
863
864 static int tg3_bmcr_reset(struct tg3 *tp)
865 {
866         u32 phy_control;
867         int limit, err;
868
869         /* OK, reset it, and poll the BMCR_RESET bit until it
870          * clears or we time out.
871          */
872         phy_control = BMCR_RESET;
873         err = tg3_writephy(tp, MII_BMCR, phy_control);
874         if (err != 0)
875                 return -EBUSY;
876
877         limit = 5000;
878         while (limit--) {
879                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880                 if (err != 0)
881                         return -EBUSY;
882
883                 if ((phy_control & BMCR_RESET) == 0) {
884                         udelay(40);
885                         break;
886                 }
887                 udelay(10);
888         }
889         if (limit <= 0)
890                 return -EBUSY;
891
892         return 0;
893 }
894
895 static void tg3_phy_apply_otp(struct tg3 *tp)
896 {
897         u32 otp, phy;
898
899         if (!tp->phy_otp)
900                 return;
901
902         otp = tp->phy_otp;
903
904         /* Enable SM_DSP clock and tx 6dB coding. */
905         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
906               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
907               MII_TG3_AUXCTL_ACTL_TX_6DB;
908         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
909
910         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
911         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
912         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
913
914         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
915               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
916         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
917
918         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
919         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
920         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
921
922         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
923         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
924
925         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
926         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
927
928         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
929               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
930         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
931
932         /* Turn off SM_DSP clock. */
933         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
934               MII_TG3_AUXCTL_ACTL_TX_6DB;
935         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
936 }
937
938 static int tg3_wait_macro_done(struct tg3 *tp)
939 {
940         int limit = 100;
941
942         while (limit--) {
943                 u32 tmp32;
944
945                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
946                         if ((tmp32 & 0x1000) == 0)
947                                 break;
948                 }
949         }
950         if (limit <= 0)
951                 return -EBUSY;
952
953         return 0;
954 }
955
956 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
957 {
958         static const u32 test_pat[4][6] = {
959         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
960         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
961         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
962         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
963         };
964         int chan;
965
966         for (chan = 0; chan < 4; chan++) {
967                 int i;
968
969                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
970                              (chan * 0x2000) | 0x0200);
971                 tg3_writephy(tp, 0x16, 0x0002);
972
973                 for (i = 0; i < 6; i++)
974                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
975                                      test_pat[chan][i]);
976
977                 tg3_writephy(tp, 0x16, 0x0202);
978                 if (tg3_wait_macro_done(tp)) {
979                         *resetp = 1;
980                         return -EBUSY;
981                 }
982
983                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
984                              (chan * 0x2000) | 0x0200);
985                 tg3_writephy(tp, 0x16, 0x0082);
986                 if (tg3_wait_macro_done(tp)) {
987                         *resetp = 1;
988                         return -EBUSY;
989                 }
990
991                 tg3_writephy(tp, 0x16, 0x0802);
992                 if (tg3_wait_macro_done(tp)) {
993                         *resetp = 1;
994                         return -EBUSY;
995                 }
996
997                 for (i = 0; i < 6; i += 2) {
998                         u32 low, high;
999
1000                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1001                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1002                             tg3_wait_macro_done(tp)) {
1003                                 *resetp = 1;
1004                                 return -EBUSY;
1005                         }
1006                         low &= 0x7fff;
1007                         high &= 0x000f;
1008                         if (low != test_pat[chan][i] ||
1009                             high != test_pat[chan][i+1]) {
1010                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1011                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1012                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1013
1014                                 return -EBUSY;
1015                         }
1016                 }
1017         }
1018
1019         return 0;
1020 }
1021
1022 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1023 {
1024         int chan;
1025
1026         for (chan = 0; chan < 4; chan++) {
1027                 int i;
1028
1029                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1030                              (chan * 0x2000) | 0x0200);
1031                 tg3_writephy(tp, 0x16, 0x0002);
1032                 for (i = 0; i < 6; i++)
1033                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1034                 tg3_writephy(tp, 0x16, 0x0202);
1035                 if (tg3_wait_macro_done(tp))
1036                         return -EBUSY;
1037         }
1038
1039         return 0;
1040 }
1041
1042 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1043 {
1044         u32 reg32, phy9_orig;
1045         int retries, do_phy_reset, err;
1046
1047         retries = 10;
1048         do_phy_reset = 1;
1049         do {
1050                 if (do_phy_reset) {
1051                         err = tg3_bmcr_reset(tp);
1052                         if (err)
1053                                 return err;
1054                         do_phy_reset = 0;
1055                 }
1056
1057                 /* Disable transmitter and interrupt.  */
1058                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1059                         continue;
1060
1061                 reg32 |= 0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063
1064                 /* Set full-duplex, 1000 mbps.  */
1065                 tg3_writephy(tp, MII_BMCR,
1066                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1067
1068                 /* Set to master mode.  */
1069                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1070                         continue;
1071
1072                 tg3_writephy(tp, MII_TG3_CTRL,
1073                              (MII_TG3_CTRL_AS_MASTER |
1074                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1075
1076                 /* Enable SM_DSP_CLOCK and 6dB.  */
1077                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1078
1079                 /* Block the PHY control access.  */
1080                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1081                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1082
1083                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1084                 if (!err)
1085                         break;
1086         } while (--retries);
1087
1088         err = tg3_phy_reset_chanpat(tp);
1089         if (err)
1090                 return err;
1091
1092         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1093         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1094
1095         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1096         tg3_writephy(tp, 0x16, 0x0000);
1097
1098         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1099             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1100                 /* Set Extended packet length bit for jumbo frames */
1101                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1102         }
1103         else {
1104                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1105         }
1106
1107         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1108
1109         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1110                 reg32 &= ~0x3000;
1111                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1112         } else if (!err)
1113                 err = -EBUSY;
1114
1115         return err;
1116 }
1117
1118 static void tg3_link_report(struct tg3 *);
1119
1120 /* This will reset the tigon3 PHY if there is no valid
1121  * link unless the FORCE argument is non-zero.
1122  */
1123 static int tg3_phy_reset(struct tg3 *tp)
1124 {
1125         u32 cpmuctrl;
1126         u32 phy_status;
1127         int err;
1128
1129         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1130                 u32 val;
1131
1132                 val = tr32(GRC_MISC_CFG);
1133                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1134                 udelay(40);
1135         }
1136         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1137         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1138         if (err != 0)
1139                 return -EBUSY;
1140
1141         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1142                 netif_carrier_off(tp->dev);
1143                 tg3_link_report(tp);
1144         }
1145
1146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1149                 err = tg3_phy_reset_5703_4_5(tp);
1150                 if (err)
1151                         return err;
1152                 goto out;
1153         }
1154
1155         cpmuctrl = 0;
1156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1157             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1158                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1159                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1160                         tw32(TG3_CPMU_CTRL,
1161                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1162         }
1163
1164         err = tg3_bmcr_reset(tp);
1165         if (err)
1166                 return err;
1167
1168         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1169                 u32 phy;
1170
1171                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1172                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1173
1174                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1175         }
1176
1177         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1178                 u32 val;
1179
1180                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1181                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1182                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1183                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1184                         udelay(40);
1185                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1186                 }
1187
1188                 /* Disable GPHY autopowerdown. */
1189                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1190                              MII_TG3_MISC_SHDW_WREN |
1191                              MII_TG3_MISC_SHDW_APD_SEL |
1192                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1193         }
1194
1195         tg3_phy_apply_otp(tp);
1196
1197 out:
1198         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1199                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1201                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1202                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1203                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1204                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1205         }
1206         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1207                 tg3_writephy(tp, 0x1c, 0x8d68);
1208                 tg3_writephy(tp, 0x1c, 0x8d68);
1209         }
1210         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1211                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1212                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1213                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1215                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1216                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1217                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1218                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1219         }
1220         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1221                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1222                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1223                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1224                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1225                         tg3_writephy(tp, MII_TG3_TEST1,
1226                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1227                 } else
1228                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1229                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1230         }
1231         /* Set Extended packet length bit (bit 14) on all chips that */
1232         /* support jumbo frames */
1233         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1234                 /* Cannot do read-modify-write on 5401 */
1235                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1236         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1237                 u32 phy_reg;
1238
1239                 /* Set bit 14 with read-modify-write to preserve other bits */
1240                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1241                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1242                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1243         }
1244
1245         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1246          * jumbo frames transmission.
1247          */
1248         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1249                 u32 phy_reg;
1250
1251                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1252                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1253                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1254         }
1255
1256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1257                 /* adjust output voltage */
1258                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1259         }
1260
1261         tg3_phy_toggle_automdix(tp, 1);
1262         tg3_phy_set_wirespeed(tp);
1263         return 0;
1264 }
1265
1266 static void tg3_frob_aux_power(struct tg3 *tp)
1267 {
1268         struct tg3 *tp_peer = tp;
1269
1270         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1271                 return;
1272
1273         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1274             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1275                 struct net_device *dev_peer;
1276
1277                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1278                 /* remove_one() may have been run on the peer. */
1279                 if (!dev_peer)
1280                         tp_peer = tp;
1281                 else
1282                         tp_peer = netdev_priv(dev_peer);
1283         }
1284
1285         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1286             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1287             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1288             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1292                                     (GRC_LCLCTRL_GPIO_OE0 |
1293                                      GRC_LCLCTRL_GPIO_OE1 |
1294                                      GRC_LCLCTRL_GPIO_OE2 |
1295                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1296                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1297                                     100);
1298                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1299                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1300                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1301                                              GRC_LCLCTRL_GPIO_OE1 |
1302                                              GRC_LCLCTRL_GPIO_OE2 |
1303                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1304                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1305                                              tp->grc_local_ctrl;
1306                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1307
1308                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1309                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1310
1311                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1312                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1313                 } else {
1314                         u32 no_gpio2;
1315                         u32 grc_local_ctrl = 0;
1316
1317                         if (tp_peer != tp &&
1318                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1319                                 return;
1320
1321                         /* Workaround to prevent overdrawing Amps. */
1322                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1323                             ASIC_REV_5714) {
1324                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1325                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1326                                             grc_local_ctrl, 100);
1327                         }
1328
1329                         /* On 5753 and variants, GPIO2 cannot be used. */
1330                         no_gpio2 = tp->nic_sram_data_cfg &
1331                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1332
1333                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1334                                          GRC_LCLCTRL_GPIO_OE1 |
1335                                          GRC_LCLCTRL_GPIO_OE2 |
1336                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1337                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1338                         if (no_gpio2) {
1339                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1340                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1341                         }
1342                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1343                                                     grc_local_ctrl, 100);
1344
1345                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1346
1347                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1348                                                     grc_local_ctrl, 100);
1349
1350                         if (!no_gpio2) {
1351                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1352                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1353                                             grc_local_ctrl, 100);
1354                         }
1355                 }
1356         } else {
1357                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1358                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1359                         if (tp_peer != tp &&
1360                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1361                                 return;
1362
1363                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1364                                     (GRC_LCLCTRL_GPIO_OE1 |
1365                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1366
1367                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1368                                     GRC_LCLCTRL_GPIO_OE1, 100);
1369
1370                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1371                                     (GRC_LCLCTRL_GPIO_OE1 |
1372                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1373                 }
1374         }
1375 }
1376
1377 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1378 {
1379         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1380                 return 1;
1381         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1382                 if (speed != SPEED_10)
1383                         return 1;
1384         } else if (speed == SPEED_10)
1385                 return 1;
1386
1387         return 0;
1388 }
1389
1390 static int tg3_setup_phy(struct tg3 *, int);
1391
1392 #define RESET_KIND_SHUTDOWN     0
1393 #define RESET_KIND_INIT         1
1394 #define RESET_KIND_SUSPEND      2
1395
1396 static void tg3_write_sig_post_reset(struct tg3 *, int);
1397 static int tg3_halt_cpu(struct tg3 *, u32);
1398 static int tg3_nvram_lock(struct tg3 *);
1399 static void tg3_nvram_unlock(struct tg3 *);
1400
1401 static void tg3_power_down_phy(struct tg3 *tp)
1402 {
1403         u32 val;
1404
1405         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1406                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1407                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1408                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1409
1410                         sg_dig_ctrl |=
1411                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1412                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1413                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1414                 }
1415                 return;
1416         }
1417
1418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1419                 tg3_bmcr_reset(tp);
1420                 val = tr32(GRC_MISC_CFG);
1421                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1422                 udelay(40);
1423                 return;
1424         } else {
1425                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1426                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1427                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1428         }
1429
1430         /* The PHY should not be powered down on some chips because
1431          * of bugs.
1432          */
1433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1434             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1435             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1436              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1437                 return;
1438
1439         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1440                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1441                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1442                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1443                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1444         }
1445
1446         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1447 }
1448
1449 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1450 {
1451         u32 misc_host_ctrl;
1452         u16 power_control, power_caps;
1453         int pm = tp->pm_cap;
1454
1455         /* Make sure register accesses (indirect or otherwise)
1456          * will function correctly.
1457          */
1458         pci_write_config_dword(tp->pdev,
1459                                TG3PCI_MISC_HOST_CTRL,
1460                                tp->misc_host_ctrl);
1461
1462         pci_read_config_word(tp->pdev,
1463                              pm + PCI_PM_CTRL,
1464                              &power_control);
1465         power_control |= PCI_PM_CTRL_PME_STATUS;
1466         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1467         switch (state) {
1468         case PCI_D0:
1469                 power_control |= 0;
1470                 pci_write_config_word(tp->pdev,
1471                                       pm + PCI_PM_CTRL,
1472                                       power_control);
1473                 udelay(100);    /* Delay after power state change */
1474
1475                 /* Switch out of Vaux if it is a NIC */
1476                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1477                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1478
1479                 return 0;
1480
1481         case PCI_D1:
1482                 power_control |= 1;
1483                 break;
1484
1485         case PCI_D2:
1486                 power_control |= 2;
1487                 break;
1488
1489         case PCI_D3hot:
1490                 power_control |= 3;
1491                 break;
1492
1493         default:
1494                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1495                        "requested.\n",
1496                        tp->dev->name, state);
1497                 return -EINVAL;
1498         };
1499
1500         power_control |= PCI_PM_CTRL_PME_ENABLE;
1501
1502         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1503         tw32(TG3PCI_MISC_HOST_CTRL,
1504              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1505
1506         if (tp->link_config.phy_is_low_power == 0) {
1507                 tp->link_config.phy_is_low_power = 1;
1508                 tp->link_config.orig_speed = tp->link_config.speed;
1509                 tp->link_config.orig_duplex = tp->link_config.duplex;
1510                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1511         }
1512
1513         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1514                 tp->link_config.speed = SPEED_10;
1515                 tp->link_config.duplex = DUPLEX_HALF;
1516                 tp->link_config.autoneg = AUTONEG_ENABLE;
1517                 tg3_setup_phy(tp, 0);
1518         }
1519
1520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1521                 u32 val;
1522
1523                 val = tr32(GRC_VCPU_EXT_CTRL);
1524                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1525         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1526                 int i;
1527                 u32 val;
1528
1529                 for (i = 0; i < 200; i++) {
1530                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1531                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1532                                 break;
1533                         msleep(1);
1534                 }
1535         }
1536         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1537                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1538                                                      WOL_DRV_STATE_SHUTDOWN |
1539                                                      WOL_DRV_WOL |
1540                                                      WOL_SET_MAGIC_PKT);
1541
1542         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1543
1544         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1545                 u32 mac_mode;
1546
1547                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1548                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1549                         udelay(40);
1550
1551                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1552                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1553                         else
1554                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1555
1556                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1557                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1558                             ASIC_REV_5700) {
1559                                 u32 speed = (tp->tg3_flags &
1560                                              TG3_FLAG_WOL_SPEED_100MB) ?
1561                                              SPEED_100 : SPEED_10;
1562                                 if (tg3_5700_link_polarity(tp, speed))
1563                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1564                                 else
1565                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1566                         }
1567                 } else {
1568                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1569                 }
1570
1571                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1572                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1573
1574                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1575                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1576                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1577
1578                 tw32_f(MAC_MODE, mac_mode);
1579                 udelay(100);
1580
1581                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1582                 udelay(10);
1583         }
1584
1585         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1586             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1587              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1588                 u32 base_val;
1589
1590                 base_val = tp->pci_clock_ctrl;
1591                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1592                              CLOCK_CTRL_TXCLK_DISABLE);
1593
1594                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1595                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1596         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1597                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1598                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1599                 /* do nothing */
1600         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1601                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1602                 u32 newbits1, newbits2;
1603
1604                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1605                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1606                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1607                                     CLOCK_CTRL_TXCLK_DISABLE |
1608                                     CLOCK_CTRL_ALTCLK);
1609                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1610                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1611                         newbits1 = CLOCK_CTRL_625_CORE;
1612                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1613                 } else {
1614                         newbits1 = CLOCK_CTRL_ALTCLK;
1615                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1616                 }
1617
1618                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1619                             40);
1620
1621                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1622                             40);
1623
1624                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1625                         u32 newbits3;
1626
1627                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1628                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1629                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1630                                             CLOCK_CTRL_TXCLK_DISABLE |
1631                                             CLOCK_CTRL_44MHZ_CORE);
1632                         } else {
1633                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1634                         }
1635
1636                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1637                                     tp->pci_clock_ctrl | newbits3, 40);
1638                 }
1639         }
1640
1641         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1642             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1643             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1644                 tg3_power_down_phy(tp);
1645
1646         tg3_frob_aux_power(tp);
1647
1648         /* Workaround for unstable PLL clock */
1649         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1650             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1651                 u32 val = tr32(0x7d00);
1652
1653                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1654                 tw32(0x7d00, val);
1655                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1656                         int err;
1657
1658                         err = tg3_nvram_lock(tp);
1659                         tg3_halt_cpu(tp, RX_CPU_BASE);
1660                         if (!err)
1661                                 tg3_nvram_unlock(tp);
1662                 }
1663         }
1664
1665         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1666
1667         /* Finally, set the new power state. */
1668         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1669         udelay(100);    /* Delay after power state change */
1670
1671         return 0;
1672 }
1673
1674 /* tp->lock is held. */
1675 static inline void tg3_generate_fw_event(struct tg3 *tp)
1676 {
1677         u32 val;
1678
1679         val = tr32(GRC_RX_CPU_EVENT);
1680         val |= GRC_RX_CPU_DRIVER_EVENT;
1681         tw32_f(GRC_RX_CPU_EVENT, val);
1682
1683         tp->last_event_jiffies = jiffies;
1684 }
1685
1686 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1687
1688 /* tp->lock is held. */
1689 static void tg3_wait_for_event_ack(struct tg3 *tp)
1690 {
1691         int i;
1692         unsigned int delay_cnt;
1693         long time_remain;
1694
1695         /* If enough time has passed, no wait is necessary. */
1696         time_remain = (long)(tp->last_event_jiffies + 1 +
1697                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1698                       (long)jiffies;
1699         if (time_remain < 0)
1700                 return;
1701
1702         /* Check if we can shorten the wait time. */
1703         delay_cnt = jiffies_to_usecs(time_remain);
1704         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1705                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1706         delay_cnt = (delay_cnt >> 3) + 1;
1707
1708         for (i = 0; i < delay_cnt; i++) {
1709                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1710                         break;
1711                 udelay(8);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_ump_link_report(struct tg3 *tp)
1717 {
1718         u32 reg;
1719         u32 val;
1720
1721         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1722             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1723                 return;
1724
1725         tg3_wait_for_event_ack(tp);
1726
1727         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1728
1729         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1730
1731         val = 0;
1732         if (!tg3_readphy(tp, MII_BMCR, &reg))
1733                 val = reg << 16;
1734         if (!tg3_readphy(tp, MII_BMSR, &reg))
1735                 val |= (reg & 0xffff);
1736         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1737
1738         val = 0;
1739         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1740                 val = reg << 16;
1741         if (!tg3_readphy(tp, MII_LPA, &reg))
1742                 val |= (reg & 0xffff);
1743         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1744
1745         val = 0;
1746         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1747                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1748                         val = reg << 16;
1749                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1750                         val |= (reg & 0xffff);
1751         }
1752         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1753
1754         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1755                 val = reg << 16;
1756         else
1757                 val = 0;
1758         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1759
1760         tg3_generate_fw_event(tp);
1761 }
1762
1763 static void tg3_link_report(struct tg3 *tp)
1764 {
1765         if (!netif_carrier_ok(tp->dev)) {
1766                 if (netif_msg_link(tp))
1767                         printk(KERN_INFO PFX "%s: Link is down.\n",
1768                                tp->dev->name);
1769                 tg3_ump_link_report(tp);
1770         } else if (netif_msg_link(tp)) {
1771                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1772                        tp->dev->name,
1773                        (tp->link_config.active_speed == SPEED_1000 ?
1774                         1000 :
1775                         (tp->link_config.active_speed == SPEED_100 ?
1776                          100 : 10)),
1777                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1778                         "full" : "half"));
1779
1780                 printk(KERN_INFO PFX
1781                        "%s: Flow control is %s for TX and %s for RX.\n",
1782                        tp->dev->name,
1783                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1784                        "on" : "off",
1785                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1786                        "on" : "off");
1787                 tg3_ump_link_report(tp);
1788         }
1789 }
1790
1791 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1792 {
1793         u16 miireg;
1794
1795         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1796                 miireg = ADVERTISE_PAUSE_CAP;
1797         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1798                 miireg = ADVERTISE_PAUSE_ASYM;
1799         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1800                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1801         else
1802                 miireg = 0;
1803
1804         return miireg;
1805 }
1806
1807 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1808 {
1809         u16 miireg;
1810
1811         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1812                 miireg = ADVERTISE_1000XPAUSE;
1813         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1814                 miireg = ADVERTISE_1000XPSE_ASYM;
1815         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1816                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1817         else
1818                 miireg = 0;
1819
1820         return miireg;
1821 }
1822
1823 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1824 {
1825         u8 cap = 0;
1826
1827         if (lcladv & ADVERTISE_PAUSE_CAP) {
1828                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1829                         if (rmtadv & LPA_PAUSE_CAP)
1830                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1831                         else if (rmtadv & LPA_PAUSE_ASYM)
1832                                 cap = TG3_FLOW_CTRL_RX;
1833                 } else {
1834                         if (rmtadv & LPA_PAUSE_CAP)
1835                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1836                 }
1837         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1838                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1839                         cap = TG3_FLOW_CTRL_TX;
1840         }
1841
1842         return cap;
1843 }
1844
1845 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1846 {
1847         u8 cap = 0;
1848
1849         if (lcladv & ADVERTISE_1000XPAUSE) {
1850                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1851                         if (rmtadv & LPA_1000XPAUSE)
1852                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1853                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1854                                 cap = TG3_FLOW_CTRL_RX;
1855                 } else {
1856                         if (rmtadv & LPA_1000XPAUSE)
1857                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1858                 }
1859         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1860                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1861                         cap = TG3_FLOW_CTRL_TX;
1862         }
1863
1864         return cap;
1865 }
1866
1867 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1868 {
1869         u8 new_tg3_flags = 0;
1870         u32 old_rx_mode = tp->rx_mode;
1871         u32 old_tx_mode = tp->tx_mode;
1872
1873         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1874             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1875                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1876                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1877                                                                    remote_adv);
1878                 else
1879                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1880                                                                    remote_adv);
1881         } else {
1882                 new_tg3_flags = tp->link_config.flowctrl;
1883         }
1884
1885         tp->link_config.active_flowctrl = new_tg3_flags;
1886
1887         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1888                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1889         else
1890                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1891
1892         if (old_rx_mode != tp->rx_mode) {
1893                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1894         }
1895
1896         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1897                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1898         else
1899                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1900
1901         if (old_tx_mode != tp->tx_mode) {
1902                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1903         }
1904 }
1905
1906 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1907 {
1908         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1909         case MII_TG3_AUX_STAT_10HALF:
1910                 *speed = SPEED_10;
1911                 *duplex = DUPLEX_HALF;
1912                 break;
1913
1914         case MII_TG3_AUX_STAT_10FULL:
1915                 *speed = SPEED_10;
1916                 *duplex = DUPLEX_FULL;
1917                 break;
1918
1919         case MII_TG3_AUX_STAT_100HALF:
1920                 *speed = SPEED_100;
1921                 *duplex = DUPLEX_HALF;
1922                 break;
1923
1924         case MII_TG3_AUX_STAT_100FULL:
1925                 *speed = SPEED_100;
1926                 *duplex = DUPLEX_FULL;
1927                 break;
1928
1929         case MII_TG3_AUX_STAT_1000HALF:
1930                 *speed = SPEED_1000;
1931                 *duplex = DUPLEX_HALF;
1932                 break;
1933
1934         case MII_TG3_AUX_STAT_1000FULL:
1935                 *speed = SPEED_1000;
1936                 *duplex = DUPLEX_FULL;
1937                 break;
1938
1939         default:
1940                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1941                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1942                                  SPEED_10;
1943                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1944                                   DUPLEX_HALF;
1945                         break;
1946                 }
1947                 *speed = SPEED_INVALID;
1948                 *duplex = DUPLEX_INVALID;
1949                 break;
1950         };
1951 }
1952
1953 static void tg3_phy_copper_begin(struct tg3 *tp)
1954 {
1955         u32 new_adv;
1956         int i;
1957
1958         if (tp->link_config.phy_is_low_power) {
1959                 /* Entering low power mode.  Disable gigabit and
1960                  * 100baseT advertisements.
1961                  */
1962                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1963
1964                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1965                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1966                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1967                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1968
1969                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1970         } else if (tp->link_config.speed == SPEED_INVALID) {
1971                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1972                         tp->link_config.advertising &=
1973                                 ~(ADVERTISED_1000baseT_Half |
1974                                   ADVERTISED_1000baseT_Full);
1975
1976                 new_adv = ADVERTISE_CSMA;
1977                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1978                         new_adv |= ADVERTISE_10HALF;
1979                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1980                         new_adv |= ADVERTISE_10FULL;
1981                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1982                         new_adv |= ADVERTISE_100HALF;
1983                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1984                         new_adv |= ADVERTISE_100FULL;
1985
1986                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1987
1988                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1989
1990                 if (tp->link_config.advertising &
1991                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1992                         new_adv = 0;
1993                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1994                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1995                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1996                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1997                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1998                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1999                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2000                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2001                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2002                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2003                 } else {
2004                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2005                 }
2006         } else {
2007                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2008                 new_adv |= ADVERTISE_CSMA;
2009
2010                 /* Asking for a specific link mode. */
2011                 if (tp->link_config.speed == SPEED_1000) {
2012                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2013
2014                         if (tp->link_config.duplex == DUPLEX_FULL)
2015                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2016                         else
2017                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2018                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2019                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2020                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2021                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2022                 } else {
2023                         if (tp->link_config.speed == SPEED_100) {
2024                                 if (tp->link_config.duplex == DUPLEX_FULL)
2025                                         new_adv |= ADVERTISE_100FULL;
2026                                 else
2027                                         new_adv |= ADVERTISE_100HALF;
2028                         } else {
2029                                 if (tp->link_config.duplex == DUPLEX_FULL)
2030                                         new_adv |= ADVERTISE_10FULL;
2031                                 else
2032                                         new_adv |= ADVERTISE_10HALF;
2033                         }
2034                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2035
2036                         new_adv = 0;
2037                 }
2038
2039                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2040         }
2041
2042         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2043             tp->link_config.speed != SPEED_INVALID) {
2044                 u32 bmcr, orig_bmcr;
2045
2046                 tp->link_config.active_speed = tp->link_config.speed;
2047                 tp->link_config.active_duplex = tp->link_config.duplex;
2048
2049                 bmcr = 0;
2050                 switch (tp->link_config.speed) {
2051                 default:
2052                 case SPEED_10:
2053                         break;
2054
2055                 case SPEED_100:
2056                         bmcr |= BMCR_SPEED100;
2057                         break;
2058
2059                 case SPEED_1000:
2060                         bmcr |= TG3_BMCR_SPEED1000;
2061                         break;
2062                 };
2063
2064                 if (tp->link_config.duplex == DUPLEX_FULL)
2065                         bmcr |= BMCR_FULLDPLX;
2066
2067                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2068                     (bmcr != orig_bmcr)) {
2069                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2070                         for (i = 0; i < 1500; i++) {
2071                                 u32 tmp;
2072
2073                                 udelay(10);
2074                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2075                                     tg3_readphy(tp, MII_BMSR, &tmp))
2076                                         continue;
2077                                 if (!(tmp & BMSR_LSTATUS)) {
2078                                         udelay(40);
2079                                         break;
2080                                 }
2081                         }
2082                         tg3_writephy(tp, MII_BMCR, bmcr);
2083                         udelay(40);
2084                 }
2085         } else {
2086                 tg3_writephy(tp, MII_BMCR,
2087                              BMCR_ANENABLE | BMCR_ANRESTART);
2088         }
2089 }
2090
2091 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2092 {
2093         int err;
2094
2095         /* Turn off tap power management. */
2096         /* Set Extended packet length bit */
2097         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2098
2099         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2100         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2101
2102         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2103         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2104
2105         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2106         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2107
2108         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2109         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2110
2111         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2112         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2113
2114         udelay(40);
2115
2116         return err;
2117 }
2118
2119 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2120 {
2121         u32 adv_reg, all_mask = 0;
2122
2123         if (mask & ADVERTISED_10baseT_Half)
2124                 all_mask |= ADVERTISE_10HALF;
2125         if (mask & ADVERTISED_10baseT_Full)
2126                 all_mask |= ADVERTISE_10FULL;
2127         if (mask & ADVERTISED_100baseT_Half)
2128                 all_mask |= ADVERTISE_100HALF;
2129         if (mask & ADVERTISED_100baseT_Full)
2130                 all_mask |= ADVERTISE_100FULL;
2131
2132         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2133                 return 0;
2134
2135         if ((adv_reg & all_mask) != all_mask)
2136                 return 0;
2137         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2138                 u32 tg3_ctrl;
2139
2140                 all_mask = 0;
2141                 if (mask & ADVERTISED_1000baseT_Half)
2142                         all_mask |= ADVERTISE_1000HALF;
2143                 if (mask & ADVERTISED_1000baseT_Full)
2144                         all_mask |= ADVERTISE_1000FULL;
2145
2146                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2147                         return 0;
2148
2149                 if ((tg3_ctrl & all_mask) != all_mask)
2150                         return 0;
2151         }
2152         return 1;
2153 }
2154
2155 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2156 {
2157         u32 curadv, reqadv;
2158
2159         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2160                 return 1;
2161
2162         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2163         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2164
2165         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2166                 if (curadv != reqadv)
2167                         return 0;
2168
2169                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2170                         tg3_readphy(tp, MII_LPA, rmtadv);
2171         } else {
2172                 /* Reprogram the advertisement register, even if it
2173                  * does not affect the current link.  If the link
2174                  * gets renegotiated in the future, we can save an
2175                  * additional renegotiation cycle by advertising
2176                  * it correctly in the first place.
2177                  */
2178                 if (curadv != reqadv) {
2179                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2180                                      ADVERTISE_PAUSE_ASYM);
2181                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2182                 }
2183         }
2184
2185         return 1;
2186 }
2187
2188 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2189 {
2190         int current_link_up;
2191         u32 bmsr, dummy;
2192         u32 lcl_adv, rmt_adv;
2193         u16 current_speed;
2194         u8 current_duplex;
2195         int i, err;
2196
2197         tw32(MAC_EVENT, 0);
2198
2199         tw32_f(MAC_STATUS,
2200              (MAC_STATUS_SYNC_CHANGED |
2201               MAC_STATUS_CFG_CHANGED |
2202               MAC_STATUS_MI_COMPLETION |
2203               MAC_STATUS_LNKSTATE_CHANGED));
2204         udelay(40);
2205
2206         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2207                 tw32_f(MAC_MI_MODE,
2208                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2209                 udelay(80);
2210         }
2211
2212         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2213
2214         /* Some third-party PHYs need to be reset on link going
2215          * down.
2216          */
2217         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2218              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2219              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2220             netif_carrier_ok(tp->dev)) {
2221                 tg3_readphy(tp, MII_BMSR, &bmsr);
2222                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2223                     !(bmsr & BMSR_LSTATUS))
2224                         force_reset = 1;
2225         }
2226         if (force_reset)
2227                 tg3_phy_reset(tp);
2228
2229         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2230                 tg3_readphy(tp, MII_BMSR, &bmsr);
2231                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2232                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2233                         bmsr = 0;
2234
2235                 if (!(bmsr & BMSR_LSTATUS)) {
2236                         err = tg3_init_5401phy_dsp(tp);
2237                         if (err)
2238                                 return err;
2239
2240                         tg3_readphy(tp, MII_BMSR, &bmsr);
2241                         for (i = 0; i < 1000; i++) {
2242                                 udelay(10);
2243                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2244                                     (bmsr & BMSR_LSTATUS)) {
2245                                         udelay(40);
2246                                         break;
2247                                 }
2248                         }
2249
2250                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2251                             !(bmsr & BMSR_LSTATUS) &&
2252                             tp->link_config.active_speed == SPEED_1000) {
2253                                 err = tg3_phy_reset(tp);
2254                                 if (!err)
2255                                         err = tg3_init_5401phy_dsp(tp);
2256                                 if (err)
2257                                         return err;
2258                         }
2259                 }
2260         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2261                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2262                 /* 5701 {A0,B0} CRC bug workaround */
2263                 tg3_writephy(tp, 0x15, 0x0a75);
2264                 tg3_writephy(tp, 0x1c, 0x8c68);
2265                 tg3_writephy(tp, 0x1c, 0x8d68);
2266                 tg3_writephy(tp, 0x1c, 0x8c68);
2267         }
2268
2269         /* Clear pending interrupts... */
2270         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2271         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2272
2273         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2274                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2275         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2276                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2277
2278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2279             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2280                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2281                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2282                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2283                 else
2284                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2285         }
2286
2287         current_link_up = 0;
2288         current_speed = SPEED_INVALID;
2289         current_duplex = DUPLEX_INVALID;
2290
2291         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2292                 u32 val;
2293
2294                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2295                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2296                 if (!(val & (1 << 10))) {
2297                         val |= (1 << 10);
2298                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2299                         goto relink;
2300                 }
2301         }
2302
2303         bmsr = 0;
2304         for (i = 0; i < 100; i++) {
2305                 tg3_readphy(tp, MII_BMSR, &bmsr);
2306                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2307                     (bmsr & BMSR_LSTATUS))
2308                         break;
2309                 udelay(40);
2310         }
2311
2312         if (bmsr & BMSR_LSTATUS) {
2313                 u32 aux_stat, bmcr;
2314
2315                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2316                 for (i = 0; i < 2000; i++) {
2317                         udelay(10);
2318                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2319                             aux_stat)
2320                                 break;
2321                 }
2322
2323                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2324                                              &current_speed,
2325                                              &current_duplex);
2326
2327                 bmcr = 0;
2328                 for (i = 0; i < 200; i++) {
2329                         tg3_readphy(tp, MII_BMCR, &bmcr);
2330                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2331                                 continue;
2332                         if (bmcr && bmcr != 0x7fff)
2333                                 break;
2334                         udelay(10);
2335                 }
2336
2337                 lcl_adv = 0;
2338                 rmt_adv = 0;
2339
2340                 tp->link_config.active_speed = current_speed;
2341                 tp->link_config.active_duplex = current_duplex;
2342
2343                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2344                         if ((bmcr & BMCR_ANENABLE) &&
2345                             tg3_copper_is_advertising_all(tp,
2346                                                 tp->link_config.advertising)) {
2347                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2348                                                                   &rmt_adv))
2349                                         current_link_up = 1;
2350                         }
2351                 } else {
2352                         if (!(bmcr & BMCR_ANENABLE) &&
2353                             tp->link_config.speed == current_speed &&
2354                             tp->link_config.duplex == current_duplex &&
2355                             tp->link_config.flowctrl ==
2356                             tp->link_config.active_flowctrl) {
2357                                 current_link_up = 1;
2358                         }
2359                 }
2360
2361                 if (current_link_up == 1 &&
2362                     tp->link_config.active_duplex == DUPLEX_FULL)
2363                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2364         }
2365
2366 relink:
2367         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2368                 u32 tmp;
2369
2370                 tg3_phy_copper_begin(tp);
2371
2372                 tg3_readphy(tp, MII_BMSR, &tmp);
2373                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2374                     (tmp & BMSR_LSTATUS))
2375                         current_link_up = 1;
2376         }
2377
2378         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2379         if (current_link_up == 1) {
2380                 if (tp->link_config.active_speed == SPEED_100 ||
2381                     tp->link_config.active_speed == SPEED_10)
2382                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2383                 else
2384                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2385         } else
2386                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2387
2388         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2389         if (tp->link_config.active_duplex == DUPLEX_HALF)
2390                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2391
2392         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2393                 if (current_link_up == 1 &&
2394                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2395                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2396                 else
2397                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2398         }
2399
2400         /* ??? Without this setting Netgear GA302T PHY does not
2401          * ??? send/receive packets...
2402          */
2403         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2404             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2405                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2406                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2407                 udelay(80);
2408         }
2409
2410         tw32_f(MAC_MODE, tp->mac_mode);
2411         udelay(40);
2412
2413         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2414                 /* Polled via timer. */
2415                 tw32_f(MAC_EVENT, 0);
2416         } else {
2417                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2418         }
2419         udelay(40);
2420
2421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2422             current_link_up == 1 &&
2423             tp->link_config.active_speed == SPEED_1000 &&
2424             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2425              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2426                 udelay(120);
2427                 tw32_f(MAC_STATUS,
2428                      (MAC_STATUS_SYNC_CHANGED |
2429                       MAC_STATUS_CFG_CHANGED));
2430                 udelay(40);
2431                 tg3_write_mem(tp,
2432                               NIC_SRAM_FIRMWARE_MBOX,
2433                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2434         }
2435
2436         if (current_link_up != netif_carrier_ok(tp->dev)) {
2437                 if (current_link_up)
2438                         netif_carrier_on(tp->dev);
2439                 else
2440                         netif_carrier_off(tp->dev);
2441                 tg3_link_report(tp);
2442         }
2443
2444         return 0;
2445 }
2446
2447 struct tg3_fiber_aneginfo {
2448         int state;
2449 #define ANEG_STATE_UNKNOWN              0
2450 #define ANEG_STATE_AN_ENABLE            1
2451 #define ANEG_STATE_RESTART_INIT         2
2452 #define ANEG_STATE_RESTART              3
2453 #define ANEG_STATE_DISABLE_LINK_OK      4
2454 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2455 #define ANEG_STATE_ABILITY_DETECT       6
2456 #define ANEG_STATE_ACK_DETECT_INIT      7
2457 #define ANEG_STATE_ACK_DETECT           8
2458 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2459 #define ANEG_STATE_COMPLETE_ACK         10
2460 #define ANEG_STATE_IDLE_DETECT_INIT     11
2461 #define ANEG_STATE_IDLE_DETECT          12
2462 #define ANEG_STATE_LINK_OK              13
2463 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2464 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2465
2466         u32 flags;
2467 #define MR_AN_ENABLE            0x00000001
2468 #define MR_RESTART_AN           0x00000002
2469 #define MR_AN_COMPLETE          0x00000004
2470 #define MR_PAGE_RX              0x00000008
2471 #define MR_NP_LOADED            0x00000010
2472 #define MR_TOGGLE_TX            0x00000020
2473 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2474 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2475 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2476 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2477 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2478 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2479 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2480 #define MR_TOGGLE_RX            0x00002000
2481 #define MR_NP_RX                0x00004000
2482
2483 #define MR_LINK_OK              0x80000000
2484
2485         unsigned long link_time, cur_time;
2486
2487         u32 ability_match_cfg;
2488         int ability_match_count;
2489
2490         char ability_match, idle_match, ack_match;
2491
2492         u32 txconfig, rxconfig;
2493 #define ANEG_CFG_NP             0x00000080
2494 #define ANEG_CFG_ACK            0x00000040
2495 #define ANEG_CFG_RF2            0x00000020
2496 #define ANEG_CFG_RF1            0x00000010
2497 #define ANEG_CFG_PS2            0x00000001
2498 #define ANEG_CFG_PS1            0x00008000
2499 #define ANEG_CFG_HD             0x00004000
2500 #define ANEG_CFG_FD             0x00002000
2501 #define ANEG_CFG_INVAL          0x00001f06
2502
2503 };
2504 #define ANEG_OK         0
2505 #define ANEG_DONE       1
2506 #define ANEG_TIMER_ENAB 2
2507 #define ANEG_FAILED     -1
2508
2509 #define ANEG_STATE_SETTLE_TIME  10000
2510
2511 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2512                                    struct tg3_fiber_aneginfo *ap)
2513 {
2514         u16 flowctrl;
2515         unsigned long delta;
2516         u32 rx_cfg_reg;
2517         int ret;
2518
2519         if (ap->state == ANEG_STATE_UNKNOWN) {
2520                 ap->rxconfig = 0;
2521                 ap->link_time = 0;
2522                 ap->cur_time = 0;
2523                 ap->ability_match_cfg = 0;
2524                 ap->ability_match_count = 0;
2525                 ap->ability_match = 0;
2526                 ap->idle_match = 0;
2527                 ap->ack_match = 0;
2528         }
2529         ap->cur_time++;
2530
2531         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2532                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2533
2534                 if (rx_cfg_reg != ap->ability_match_cfg) {
2535                         ap->ability_match_cfg = rx_cfg_reg;
2536                         ap->ability_match = 0;
2537                         ap->ability_match_count = 0;
2538                 } else {
2539                         if (++ap->ability_match_count > 1) {
2540                                 ap->ability_match = 1;
2541                                 ap->ability_match_cfg = rx_cfg_reg;
2542                         }
2543                 }
2544                 if (rx_cfg_reg & ANEG_CFG_ACK)
2545                         ap->ack_match = 1;
2546                 else
2547                         ap->ack_match = 0;
2548
2549                 ap->idle_match = 0;
2550         } else {
2551                 ap->idle_match = 1;
2552                 ap->ability_match_cfg = 0;
2553                 ap->ability_match_count = 0;
2554                 ap->ability_match = 0;
2555                 ap->ack_match = 0;
2556
2557                 rx_cfg_reg = 0;
2558         }
2559
2560         ap->rxconfig = rx_cfg_reg;
2561         ret = ANEG_OK;
2562
2563         switch(ap->state) {
2564         case ANEG_STATE_UNKNOWN:
2565                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2566                         ap->state = ANEG_STATE_AN_ENABLE;
2567
2568                 /* fallthru */
2569         case ANEG_STATE_AN_ENABLE:
2570                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2571                 if (ap->flags & MR_AN_ENABLE) {
2572                         ap->link_time = 0;
2573                         ap->cur_time = 0;
2574                         ap->ability_match_cfg = 0;
2575                         ap->ability_match_count = 0;
2576                         ap->ability_match = 0;
2577                         ap->idle_match = 0;
2578                         ap->ack_match = 0;
2579
2580                         ap->state = ANEG_STATE_RESTART_INIT;
2581                 } else {
2582                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2583                 }
2584                 break;
2585
2586         case ANEG_STATE_RESTART_INIT:
2587                 ap->link_time = ap->cur_time;
2588                 ap->flags &= ~(MR_NP_LOADED);
2589                 ap->txconfig = 0;
2590                 tw32(MAC_TX_AUTO_NEG, 0);
2591                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2592                 tw32_f(MAC_MODE, tp->mac_mode);
2593                 udelay(40);
2594
2595                 ret = ANEG_TIMER_ENAB;
2596                 ap->state = ANEG_STATE_RESTART;
2597
2598                 /* fallthru */
2599         case ANEG_STATE_RESTART:
2600                 delta = ap->cur_time - ap->link_time;
2601                 if (delta > ANEG_STATE_SETTLE_TIME) {
2602                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2603                 } else {
2604                         ret = ANEG_TIMER_ENAB;
2605                 }
2606                 break;
2607
2608         case ANEG_STATE_DISABLE_LINK_OK:
2609                 ret = ANEG_DONE;
2610                 break;
2611
2612         case ANEG_STATE_ABILITY_DETECT_INIT:
2613                 ap->flags &= ~(MR_TOGGLE_TX);
2614                 ap->txconfig = ANEG_CFG_FD;
2615                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2616                 if (flowctrl & ADVERTISE_1000XPAUSE)
2617                         ap->txconfig |= ANEG_CFG_PS1;
2618                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2619                         ap->txconfig |= ANEG_CFG_PS2;
2620                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2621                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2622                 tw32_f(MAC_MODE, tp->mac_mode);
2623                 udelay(40);
2624
2625                 ap->state = ANEG_STATE_ABILITY_DETECT;
2626                 break;
2627
2628         case ANEG_STATE_ABILITY_DETECT:
2629                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2630                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2631                 }
2632                 break;
2633
2634         case ANEG_STATE_ACK_DETECT_INIT:
2635                 ap->txconfig |= ANEG_CFG_ACK;
2636                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2637                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2638                 tw32_f(MAC_MODE, tp->mac_mode);
2639                 udelay(40);
2640
2641                 ap->state = ANEG_STATE_ACK_DETECT;
2642
2643                 /* fallthru */
2644         case ANEG_STATE_ACK_DETECT:
2645                 if (ap->ack_match != 0) {
2646                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2647                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2648                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2649                         } else {
2650                                 ap->state = ANEG_STATE_AN_ENABLE;
2651                         }
2652                 } else if (ap->ability_match != 0 &&
2653                            ap->rxconfig == 0) {
2654                         ap->state = ANEG_STATE_AN_ENABLE;
2655                 }
2656                 break;
2657
2658         case ANEG_STATE_COMPLETE_ACK_INIT:
2659                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2660                         ret = ANEG_FAILED;
2661                         break;
2662                 }
2663                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2664                                MR_LP_ADV_HALF_DUPLEX |
2665                                MR_LP_ADV_SYM_PAUSE |
2666                                MR_LP_ADV_ASYM_PAUSE |
2667                                MR_LP_ADV_REMOTE_FAULT1 |
2668                                MR_LP_ADV_REMOTE_FAULT2 |
2669                                MR_LP_ADV_NEXT_PAGE |
2670                                MR_TOGGLE_RX |
2671                                MR_NP_RX);
2672                 if (ap->rxconfig & ANEG_CFG_FD)
2673                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2674                 if (ap->rxconfig & ANEG_CFG_HD)
2675                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2676                 if (ap->rxconfig & ANEG_CFG_PS1)
2677                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2678                 if (ap->rxconfig & ANEG_CFG_PS2)
2679                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2680                 if (ap->rxconfig & ANEG_CFG_RF1)
2681                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2682                 if (ap->rxconfig & ANEG_CFG_RF2)
2683                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2684                 if (ap->rxconfig & ANEG_CFG_NP)
2685                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2686
2687                 ap->link_time = ap->cur_time;
2688
2689                 ap->flags ^= (MR_TOGGLE_TX);
2690                 if (ap->rxconfig & 0x0008)
2691                         ap->flags |= MR_TOGGLE_RX;
2692                 if (ap->rxconfig & ANEG_CFG_NP)
2693                         ap->flags |= MR_NP_RX;
2694                 ap->flags |= MR_PAGE_RX;
2695
2696                 ap->state = ANEG_STATE_COMPLETE_ACK;
2697                 ret = ANEG_TIMER_ENAB;
2698                 break;
2699
2700         case ANEG_STATE_COMPLETE_ACK:
2701                 if (ap->ability_match != 0 &&
2702                     ap->rxconfig == 0) {
2703                         ap->state = ANEG_STATE_AN_ENABLE;
2704                         break;
2705                 }
2706                 delta = ap->cur_time - ap->link_time;
2707                 if (delta > ANEG_STATE_SETTLE_TIME) {
2708                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2709                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2710                         } else {
2711                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2712                                     !(ap->flags & MR_NP_RX)) {
2713                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2714                                 } else {
2715                                         ret = ANEG_FAILED;
2716                                 }
2717                         }
2718                 }
2719                 break;
2720
2721         case ANEG_STATE_IDLE_DETECT_INIT:
2722                 ap->link_time = ap->cur_time;
2723                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2724                 tw32_f(MAC_MODE, tp->mac_mode);
2725                 udelay(40);
2726
2727                 ap->state = ANEG_STATE_IDLE_DETECT;
2728                 ret = ANEG_TIMER_ENAB;
2729                 break;
2730
2731         case ANEG_STATE_IDLE_DETECT:
2732                 if (ap->ability_match != 0 &&
2733                     ap->rxconfig == 0) {
2734                         ap->state = ANEG_STATE_AN_ENABLE;
2735                         break;
2736                 }
2737                 delta = ap->cur_time - ap->link_time;
2738                 if (delta > ANEG_STATE_SETTLE_TIME) {
2739                         /* XXX another gem from the Broadcom driver :( */
2740                         ap->state = ANEG_STATE_LINK_OK;
2741                 }
2742                 break;
2743
2744         case ANEG_STATE_LINK_OK:
2745                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2746                 ret = ANEG_DONE;
2747                 break;
2748
2749         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2750                 /* ??? unimplemented */
2751                 break;
2752
2753         case ANEG_STATE_NEXT_PAGE_WAIT:
2754                 /* ??? unimplemented */
2755                 break;
2756
2757         default:
2758                 ret = ANEG_FAILED;
2759                 break;
2760         };
2761
2762         return ret;
2763 }
2764
2765 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2766 {
2767         int res = 0;
2768         struct tg3_fiber_aneginfo aninfo;
2769         int status = ANEG_FAILED;
2770         unsigned int tick;
2771         u32 tmp;
2772
2773         tw32_f(MAC_TX_AUTO_NEG, 0);
2774
2775         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2776         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2777         udelay(40);
2778
2779         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2780         udelay(40);
2781
2782         memset(&aninfo, 0, sizeof(aninfo));
2783         aninfo.flags |= MR_AN_ENABLE;
2784         aninfo.state = ANEG_STATE_UNKNOWN;
2785         aninfo.cur_time = 0;
2786         tick = 0;
2787         while (++tick < 195000) {
2788                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2789                 if (status == ANEG_DONE || status == ANEG_FAILED)
2790                         break;
2791
2792                 udelay(1);
2793         }
2794
2795         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2796         tw32_f(MAC_MODE, tp->mac_mode);
2797         udelay(40);
2798
2799         *txflags = aninfo.txconfig;
2800         *rxflags = aninfo.flags;
2801
2802         if (status == ANEG_DONE &&
2803             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2804                              MR_LP_ADV_FULL_DUPLEX)))
2805                 res = 1;
2806
2807         return res;
2808 }
2809
2810 static void tg3_init_bcm8002(struct tg3 *tp)
2811 {
2812         u32 mac_status = tr32(MAC_STATUS);
2813         int i;
2814
2815         /* Reset when initting first time or we have a link. */
2816         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2817             !(mac_status & MAC_STATUS_PCS_SYNCED))
2818                 return;
2819
2820         /* Set PLL lock range. */
2821         tg3_writephy(tp, 0x16, 0x8007);
2822
2823         /* SW reset */
2824         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2825
2826         /* Wait for reset to complete. */
2827         /* XXX schedule_timeout() ... */
2828         for (i = 0; i < 500; i++)
2829                 udelay(10);
2830
2831         /* Config mode; select PMA/Ch 1 regs. */
2832         tg3_writephy(tp, 0x10, 0x8411);
2833
2834         /* Enable auto-lock and comdet, select txclk for tx. */
2835         tg3_writephy(tp, 0x11, 0x0a10);
2836
2837         tg3_writephy(tp, 0x18, 0x00a0);
2838         tg3_writephy(tp, 0x16, 0x41ff);
2839
2840         /* Assert and deassert POR. */
2841         tg3_writephy(tp, 0x13, 0x0400);
2842         udelay(40);
2843         tg3_writephy(tp, 0x13, 0x0000);
2844
2845         tg3_writephy(tp, 0x11, 0x0a50);
2846         udelay(40);
2847         tg3_writephy(tp, 0x11, 0x0a10);
2848
2849         /* Wait for signal to stabilize */
2850         /* XXX schedule_timeout() ... */
2851         for (i = 0; i < 15000; i++)
2852                 udelay(10);
2853
2854         /* Deselect the channel register so we can read the PHYID
2855          * later.
2856          */
2857         tg3_writephy(tp, 0x10, 0x8011);
2858 }
2859
2860 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2861 {
2862         u16 flowctrl;
2863         u32 sg_dig_ctrl, sg_dig_status;
2864         u32 serdes_cfg, expected_sg_dig_ctrl;
2865         int workaround, port_a;
2866         int current_link_up;
2867
2868         serdes_cfg = 0;
2869         expected_sg_dig_ctrl = 0;
2870         workaround = 0;
2871         port_a = 1;
2872         current_link_up = 0;
2873
2874         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2875             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2876                 workaround = 1;
2877                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2878                         port_a = 0;
2879
2880                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2881                 /* preserve bits 20-23 for voltage regulator */
2882                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2883         }
2884
2885         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2886
2887         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2888                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2889                         if (workaround) {
2890                                 u32 val = serdes_cfg;
2891
2892                                 if (port_a)
2893                                         val |= 0xc010000;
2894                                 else
2895                                         val |= 0x4010000;
2896                                 tw32_f(MAC_SERDES_CFG, val);
2897                         }
2898
2899                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2900                 }
2901                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2902                         tg3_setup_flow_control(tp, 0, 0);
2903                         current_link_up = 1;
2904                 }
2905                 goto out;
2906         }
2907
2908         /* Want auto-negotiation.  */
2909         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2910
2911         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2912         if (flowctrl & ADVERTISE_1000XPAUSE)
2913                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2914         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2915                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2916
2917         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2918                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2919                     tp->serdes_counter &&
2920                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2921                                     MAC_STATUS_RCVD_CFG)) ==
2922                      MAC_STATUS_PCS_SYNCED)) {
2923                         tp->serdes_counter--;
2924                         current_link_up = 1;
2925                         goto out;
2926                 }
2927 restart_autoneg:
2928                 if (workaround)
2929                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2930                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2931                 udelay(5);
2932                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2933
2934                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2935                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2936         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2937                                  MAC_STATUS_SIGNAL_DET)) {
2938                 sg_dig_status = tr32(SG_DIG_STATUS);
2939                 mac_status = tr32(MAC_STATUS);
2940
2941                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2942                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2943                         u32 local_adv = 0, remote_adv = 0;
2944
2945                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2946                                 local_adv |= ADVERTISE_1000XPAUSE;
2947                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2948                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2949
2950                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2951                                 remote_adv |= LPA_1000XPAUSE;
2952                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2953                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2954
2955                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2956                         current_link_up = 1;
2957                         tp->serdes_counter = 0;
2958                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2959                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2960                         if (tp->serdes_counter)
2961                                 tp->serdes_counter--;
2962                         else {
2963                                 if (workaround) {
2964                                         u32 val = serdes_cfg;
2965
2966                                         if (port_a)
2967                                                 val |= 0xc010000;
2968                                         else
2969                                                 val |= 0x4010000;
2970
2971                                         tw32_f(MAC_SERDES_CFG, val);
2972                                 }
2973
2974                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2975                                 udelay(40);
2976
2977                                 /* Link parallel detection - link is up */
2978                                 /* only if we have PCS_SYNC and not */
2979                                 /* receiving config code words */
2980                                 mac_status = tr32(MAC_STATUS);
2981                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2982                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2983                                         tg3_setup_flow_control(tp, 0, 0);
2984                                         current_link_up = 1;
2985                                         tp->tg3_flags2 |=
2986                                                 TG3_FLG2_PARALLEL_DETECT;
2987                                         tp->serdes_counter =
2988                                                 SERDES_PARALLEL_DET_TIMEOUT;
2989                                 } else
2990                                         goto restart_autoneg;
2991                         }
2992                 }
2993         } else {
2994                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2995                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2996         }
2997
2998 out:
2999         return current_link_up;
3000 }
3001
3002 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3003 {
3004         int current_link_up = 0;
3005
3006         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3007                 goto out;
3008
3009         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3010                 u32 txflags, rxflags;
3011                 int i;
3012
3013                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3014                         u32 local_adv = 0, remote_adv = 0;
3015
3016                         if (txflags & ANEG_CFG_PS1)
3017                                 local_adv |= ADVERTISE_1000XPAUSE;
3018                         if (txflags & ANEG_CFG_PS2)
3019                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3020
3021                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3022                                 remote_adv |= LPA_1000XPAUSE;
3023                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3024                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3025
3026                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3027
3028                         current_link_up = 1;
3029                 }
3030                 for (i = 0; i < 30; i++) {
3031                         udelay(20);
3032                         tw32_f(MAC_STATUS,
3033                                (MAC_STATUS_SYNC_CHANGED |
3034                                 MAC_STATUS_CFG_CHANGED));
3035                         udelay(40);
3036                         if ((tr32(MAC_STATUS) &
3037                              (MAC_STATUS_SYNC_CHANGED |
3038                               MAC_STATUS_CFG_CHANGED)) == 0)
3039                                 break;
3040                 }
3041
3042                 mac_status = tr32(MAC_STATUS);
3043                 if (current_link_up == 0 &&
3044                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3045                     !(mac_status & MAC_STATUS_RCVD_CFG))
3046                         current_link_up = 1;
3047         } else {
3048                 tg3_setup_flow_control(tp, 0, 0);
3049
3050                 /* Forcing 1000FD link up. */
3051                 current_link_up = 1;
3052
3053                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3054                 udelay(40);
3055
3056                 tw32_f(MAC_MODE, tp->mac_mode);
3057                 udelay(40);
3058         }
3059
3060 out:
3061         return current_link_up;
3062 }
3063
3064 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3065 {
3066         u32 orig_pause_cfg;
3067         u16 orig_active_speed;
3068         u8 orig_active_duplex;
3069         u32 mac_status;
3070         int current_link_up;
3071         int i;
3072
3073         orig_pause_cfg = tp->link_config.active_flowctrl;
3074         orig_active_speed = tp->link_config.active_speed;
3075         orig_active_duplex = tp->link_config.active_duplex;
3076
3077         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3078             netif_carrier_ok(tp->dev) &&
3079             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3080                 mac_status = tr32(MAC_STATUS);
3081                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3082                                MAC_STATUS_SIGNAL_DET |
3083                                MAC_STATUS_CFG_CHANGED |
3084                                MAC_STATUS_RCVD_CFG);
3085                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3086                                    MAC_STATUS_SIGNAL_DET)) {
3087                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3088                                             MAC_STATUS_CFG_CHANGED));
3089                         return 0;
3090                 }
3091         }
3092
3093         tw32_f(MAC_TX_AUTO_NEG, 0);
3094
3095         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3096         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3097         tw32_f(MAC_MODE, tp->mac_mode);
3098         udelay(40);
3099
3100         if (tp->phy_id == PHY_ID_BCM8002)
3101                 tg3_init_bcm8002(tp);
3102
3103         /* Enable link change event even when serdes polling.  */
3104         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3105         udelay(40);
3106
3107         current_link_up = 0;
3108         mac_status = tr32(MAC_STATUS);
3109
3110         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3111                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3112         else
3113                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3114
3115         tp->hw_status->status =
3116                 (SD_STATUS_UPDATED |
3117                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3118
3119         for (i = 0; i < 100; i++) {
3120                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3121                                     MAC_STATUS_CFG_CHANGED));
3122                 udelay(5);
3123                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3124                                          MAC_STATUS_CFG_CHANGED |
3125                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3126                         break;
3127         }
3128
3129         mac_status = tr32(MAC_STATUS);
3130         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3131                 current_link_up = 0;
3132                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3133                     tp->serdes_counter == 0) {
3134                         tw32_f(MAC_MODE, (tp->mac_mode |
3135                                           MAC_MODE_SEND_CONFIGS));
3136                         udelay(1);
3137                         tw32_f(MAC_MODE, tp->mac_mode);
3138                 }
3139         }
3140
3141         if (current_link_up == 1) {
3142                 tp->link_config.active_speed = SPEED_1000;
3143                 tp->link_config.active_duplex = DUPLEX_FULL;
3144                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3145                                     LED_CTRL_LNKLED_OVERRIDE |
3146                                     LED_CTRL_1000MBPS_ON));
3147         } else {
3148                 tp->link_config.active_speed = SPEED_INVALID;
3149                 tp->link_config.active_duplex = DUPLEX_INVALID;
3150                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3151                                     LED_CTRL_LNKLED_OVERRIDE |
3152                                     LED_CTRL_TRAFFIC_OVERRIDE));
3153         }
3154
3155         if (current_link_up != netif_carrier_ok(tp->dev)) {
3156                 if (current_link_up)
3157                         netif_carrier_on(tp->dev);
3158                 else
3159                         netif_carrier_off(tp->dev);
3160                 tg3_link_report(tp);
3161         } else {
3162                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3163                 if (orig_pause_cfg != now_pause_cfg ||
3164                     orig_active_speed != tp->link_config.active_speed ||
3165                     orig_active_duplex != tp->link_config.active_duplex)
3166                         tg3_link_report(tp);
3167         }
3168
3169         return 0;
3170 }
3171
3172 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3173 {
3174         int current_link_up, err = 0;
3175         u32 bmsr, bmcr;
3176         u16 current_speed;
3177         u8 current_duplex;
3178         u32 local_adv, remote_adv;
3179
3180         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3181         tw32_f(MAC_MODE, tp->mac_mode);
3182         udelay(40);
3183
3184         tw32(MAC_EVENT, 0);
3185
3186         tw32_f(MAC_STATUS,
3187              (MAC_STATUS_SYNC_CHANGED |
3188               MAC_STATUS_CFG_CHANGED |
3189               MAC_STATUS_MI_COMPLETION |
3190               MAC_STATUS_LNKSTATE_CHANGED));
3191         udelay(40);
3192
3193         if (force_reset)
3194                 tg3_phy_reset(tp);
3195
3196         current_link_up = 0;
3197         current_speed = SPEED_INVALID;
3198         current_duplex = DUPLEX_INVALID;
3199
3200         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3201         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3202         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3203                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3204                         bmsr |= BMSR_LSTATUS;
3205                 else
3206                         bmsr &= ~BMSR_LSTATUS;
3207         }
3208
3209         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3210
3211         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3212             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3213                 /* do nothing, just check for link up at the end */
3214         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3215                 u32 adv, new_adv;
3216
3217                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3218                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3219                                   ADVERTISE_1000XPAUSE |
3220                                   ADVERTISE_1000XPSE_ASYM |
3221                                   ADVERTISE_SLCT);
3222
3223                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3224
3225                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3226                         new_adv |= ADVERTISE_1000XHALF;
3227                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3228                         new_adv |= ADVERTISE_1000XFULL;
3229
3230                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3231                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3232                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3233                         tg3_writephy(tp, MII_BMCR, bmcr);
3234
3235                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3236                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3237                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3238
3239                         return err;
3240                 }
3241         } else {
3242                 u32 new_bmcr;
3243
3244                 bmcr &= ~BMCR_SPEED1000;
3245                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3246
3247                 if (tp->link_config.duplex == DUPLEX_FULL)
3248                         new_bmcr |= BMCR_FULLDPLX;
3249
3250                 if (new_bmcr != bmcr) {
3251                         /* BMCR_SPEED1000 is a reserved bit that needs
3252                          * to be set on write.
3253                          */
3254                         new_bmcr |= BMCR_SPEED1000;
3255
3256                         /* Force a linkdown */
3257                         if (netif_carrier_ok(tp->dev)) {
3258                                 u32 adv;
3259
3260                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3261                                 adv &= ~(ADVERTISE_1000XFULL |
3262                                          ADVERTISE_1000XHALF |
3263                                          ADVERTISE_SLCT);
3264                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3265                                 tg3_writephy(tp, MII_BMCR, bmcr |
3266                                                            BMCR_ANRESTART |
3267                                                            BMCR_ANENABLE);
3268                                 udelay(10);
3269                                 netif_carrier_off(tp->dev);
3270                         }
3271                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3272                         bmcr = new_bmcr;
3273                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3274                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3275                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3276                             ASIC_REV_5714) {
3277                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3278                                         bmsr |= BMSR_LSTATUS;
3279                                 else
3280                                         bmsr &= ~BMSR_LSTATUS;
3281                         }
3282                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3283                 }
3284         }
3285
3286         if (bmsr & BMSR_LSTATUS) {
3287                 current_speed = SPEED_1000;
3288                 current_link_up = 1;
3289                 if (bmcr & BMCR_FULLDPLX)
3290                         current_duplex = DUPLEX_FULL;
3291                 else
3292                         current_duplex = DUPLEX_HALF;
3293
3294                 local_adv = 0;
3295                 remote_adv = 0;
3296
3297                 if (bmcr & BMCR_ANENABLE) {
3298                         u32 common;
3299
3300                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3301                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3302                         common = local_adv & remote_adv;
3303                         if (common & (ADVERTISE_1000XHALF |
3304                                       ADVERTISE_1000XFULL)) {
3305                                 if (common & ADVERTISE_1000XFULL)
3306                                         current_duplex = DUPLEX_FULL;
3307                                 else
3308                                         current_duplex = DUPLEX_HALF;
3309                         }
3310                         else
3311                                 current_link_up = 0;
3312                 }
3313         }
3314
3315         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3316                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3317
3318         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3319         if (tp->link_config.active_duplex == DUPLEX_HALF)
3320                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3321
3322         tw32_f(MAC_MODE, tp->mac_mode);
3323         udelay(40);
3324
3325         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3326
3327         tp->link_config.active_speed = current_speed;
3328         tp->link_config.active_duplex = current_duplex;
3329
3330         if (current_link_up != netif_carrier_ok(tp->dev)) {
3331                 if (current_link_up)
3332                         netif_carrier_on(tp->dev);
3333                 else {
3334                         netif_carrier_off(tp->dev);
3335                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3336                 }
3337                 tg3_link_report(tp);
3338         }
3339         return err;
3340 }
3341
3342 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3343 {
3344         if (tp->serdes_counter) {
3345                 /* Give autoneg time to complete. */
3346                 tp->serdes_counter--;
3347                 return;
3348         }
3349         if (!netif_carrier_ok(tp->dev) &&
3350             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3351                 u32 bmcr;
3352
3353                 tg3_readphy(tp, MII_BMCR, &bmcr);
3354                 if (bmcr & BMCR_ANENABLE) {
3355                         u32 phy1, phy2;
3356
3357                         /* Select shadow register 0x1f */
3358                         tg3_writephy(tp, 0x1c, 0x7c00);
3359                         tg3_readphy(tp, 0x1c, &phy1);
3360
3361                         /* Select expansion interrupt status register */
3362                         tg3_writephy(tp, 0x17, 0x0f01);
3363                         tg3_readphy(tp, 0x15, &phy2);
3364                         tg3_readphy(tp, 0x15, &phy2);
3365
3366                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3367                                 /* We have signal detect and not receiving
3368                                  * config code words, link is up by parallel
3369                                  * detection.
3370                                  */
3371
3372                                 bmcr &= ~BMCR_ANENABLE;
3373                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3374                                 tg3_writephy(tp, MII_BMCR, bmcr);
3375                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3376                         }
3377                 }
3378         }
3379         else if (netif_carrier_ok(tp->dev) &&
3380                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3381                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3382                 u32 phy2;
3383
3384                 /* Select expansion interrupt status register */
3385                 tg3_writephy(tp, 0x17, 0x0f01);
3386                 tg3_readphy(tp, 0x15, &phy2);
3387                 if (phy2 & 0x20) {
3388                         u32 bmcr;
3389
3390                         /* Config code words received, turn on autoneg. */
3391                         tg3_readphy(tp, MII_BMCR, &bmcr);
3392                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3393
3394                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3395
3396                 }
3397         }
3398 }
3399
3400 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3401 {
3402         int err;
3403
3404         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3405                 err = tg3_setup_fiber_phy(tp, force_reset);
3406         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3407                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3408         } else {
3409                 err = tg3_setup_copper_phy(tp, force_reset);
3410         }
3411
3412         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3413             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3414                 u32 val, scale;
3415
3416                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3417                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3418                         scale = 65;
3419                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3420                         scale = 6;
3421                 else
3422                         scale = 12;
3423
3424                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3425                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3426                 tw32(GRC_MISC_CFG, val);
3427         }
3428
3429         if (tp->link_config.active_speed == SPEED_1000 &&
3430             tp->link_config.active_duplex == DUPLEX_HALF)
3431                 tw32(MAC_TX_LENGTHS,
3432                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3433                       (6 << TX_LENGTHS_IPG_SHIFT) |
3434                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3435         else
3436                 tw32(MAC_TX_LENGTHS,
3437                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3438                       (6 << TX_LENGTHS_IPG_SHIFT) |
3439                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3440
3441         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3442                 if (netif_carrier_ok(tp->dev)) {
3443                         tw32(HOSTCC_STAT_COAL_TICKS,
3444                              tp->coal.stats_block_coalesce_usecs);
3445                 } else {
3446                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3447                 }
3448         }
3449
3450         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3451                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3452                 if (!netif_carrier_ok(tp->dev))
3453                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3454                               tp->pwrmgmt_thresh;
3455                 else
3456                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3457                 tw32(PCIE_PWR_MGMT_THRESH, val);
3458         }
3459
3460         return err;
3461 }
3462
3463 /* This is called whenever we suspect that the system chipset is re-
3464  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3465  * is bogus tx completions. We try to recover by setting the
3466  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3467  * in the workqueue.
3468  */
3469 static void tg3_tx_recover(struct tg3 *tp)
3470 {
3471         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3472                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3473
3474         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3475                "mapped I/O cycles to the network device, attempting to "
3476                "recover. Please report the problem to the driver maintainer "
3477                "and include system chipset information.\n", tp->dev->name);
3478
3479         spin_lock(&tp->lock);
3480         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3481         spin_unlock(&tp->lock);
3482 }
3483
3484 static inline u32 tg3_tx_avail(struct tg3 *tp)
3485 {
3486         smp_mb();
3487         return (tp->tx_pending -
3488                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3489 }
3490
3491 /* Tigon3 never reports partial packet sends.  So we do not
3492  * need special logic to handle SKBs that have not had all
3493  * of their frags sent yet, like SunGEM does.
3494  */
3495 static void tg3_tx(struct tg3 *tp)
3496 {
3497         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3498         u32 sw_idx = tp->tx_cons;
3499
3500         while (sw_idx != hw_idx) {
3501                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3502                 struct sk_buff *skb = ri->skb;
3503                 int i, tx_bug = 0;
3504
3505                 if (unlikely(skb == NULL)) {
3506                         tg3_tx_recover(tp);
3507                         return;
3508                 }
3509
3510                 pci_unmap_single(tp->pdev,
3511                                  pci_unmap_addr(ri, mapping),
3512                                  skb_headlen(skb),
3513                                  PCI_DMA_TODEVICE);
3514
3515                 ri->skb = NULL;
3516
3517                 sw_idx = NEXT_TX(sw_idx);
3518
3519                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3520                         ri = &tp->tx_buffers[sw_idx];
3521                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3522                                 tx_bug = 1;
3523
3524                         pci_unmap_page(tp->pdev,
3525                                        pci_unmap_addr(ri, mapping),
3526                                        skb_shinfo(skb)->frags[i].size,
3527                                        PCI_DMA_TODEVICE);
3528
3529                         sw_idx = NEXT_TX(sw_idx);
3530                 }
3531
3532                 dev_kfree_skb(skb);
3533
3534                 if (unlikely(tx_bug)) {
3535                         tg3_tx_recover(tp);
3536                         return;
3537                 }
3538         }
3539
3540         tp->tx_cons = sw_idx;
3541
3542         /* Need to make the tx_cons update visible to tg3_start_xmit()
3543          * before checking for netif_queue_stopped().  Without the
3544          * memory barrier, there is a small possibility that tg3_start_xmit()
3545          * will miss it and cause the queue to be stopped forever.
3546          */
3547         smp_mb();
3548
3549         if (unlikely(netif_queue_stopped(tp->dev) &&
3550                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3551                 netif_tx_lock(tp->dev);
3552                 if (netif_queue_stopped(tp->dev) &&
3553                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3554                         netif_wake_queue(tp->dev);
3555                 netif_tx_unlock(tp->dev);
3556         }
3557 }
3558
3559 /* Returns size of skb allocated or < 0 on error.
3560  *
3561  * We only need to fill in the address because the other members
3562  * of the RX descriptor are invariant, see tg3_init_rings.
3563  *
3564  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3565  * posting buffers we only dirty the first cache line of the RX
3566  * descriptor (containing the address).  Whereas for the RX status
3567  * buffers the cpu only reads the last cacheline of the RX descriptor
3568  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3569  */
3570 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3571                             int src_idx, u32 dest_idx_unmasked)
3572 {
3573         struct tg3_rx_buffer_desc *desc;
3574         struct ring_info *map, *src_map;
3575         struct sk_buff *skb;
3576         dma_addr_t mapping;
3577         int skb_size, dest_idx;
3578
3579         src_map = NULL;
3580         switch (opaque_key) {
3581         case RXD_OPAQUE_RING_STD:
3582                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3583                 desc = &tp->rx_std[dest_idx];
3584                 map = &tp->rx_std_buffers[dest_idx];
3585                 if (src_idx >= 0)
3586                         src_map = &tp->rx_std_buffers[src_idx];
3587                 skb_size = tp->rx_pkt_buf_sz;
3588                 break;
3589
3590         case RXD_OPAQUE_RING_JUMBO:
3591                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3592                 desc = &tp->rx_jumbo[dest_idx];
3593                 map = &tp->rx_jumbo_buffers[dest_idx];
3594                 if (src_idx >= 0)
3595                         src_map = &tp->rx_jumbo_buffers[src_idx];
3596                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3597                 break;
3598
3599         default:
3600                 return -EINVAL;
3601         };
3602
3603         /* Do not overwrite any of the map or rp information
3604          * until we are sure we can commit to a new buffer.
3605          *
3606          * Callers depend upon this behavior and assume that
3607          * we leave everything unchanged if we fail.
3608          */
3609         skb = netdev_alloc_skb(tp->dev, skb_size);
3610         if (skb == NULL)
3611                 return -ENOMEM;
3612
3613         skb_reserve(skb, tp->rx_offset);
3614
3615         mapping = pci_map_single(tp->pdev, skb->data,
3616                                  skb_size - tp->rx_offset,
3617                                  PCI_DMA_FROMDEVICE);
3618
3619         map->skb = skb;
3620         pci_unmap_addr_set(map, mapping, mapping);
3621
3622         if (src_map != NULL)
3623                 src_map->skb = NULL;
3624
3625         desc->addr_hi = ((u64)mapping >> 32);
3626         desc->addr_lo = ((u64)mapping & 0xffffffff);
3627
3628         return skb_size;
3629 }
3630
3631 /* We only need to move over in the address because the other
3632  * members of the RX descriptor are invariant.  See notes above
3633  * tg3_alloc_rx_skb for full details.
3634  */
3635 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3636                            int src_idx, u32 dest_idx_unmasked)
3637 {
3638         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3639         struct ring_info *src_map, *dest_map;
3640         int dest_idx;
3641
3642         switch (opaque_key) {
3643         case RXD_OPAQUE_RING_STD:
3644                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3645                 dest_desc = &tp->rx_std[dest_idx];
3646                 dest_map = &tp->rx_std_buffers[dest_idx];
3647                 src_desc = &tp->rx_std[src_idx];
3648                 src_map = &tp->rx_std_buffers[src_idx];
3649                 break;
3650
3651         case RXD_OPAQUE_RING_JUMBO:
3652                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3653                 dest_desc = &tp->rx_jumbo[dest_idx];
3654                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3655                 src_desc = &tp->rx_jumbo[src_idx];
3656                 src_map = &tp->rx_jumbo_buffers[src_idx];
3657                 break;
3658
3659         default:
3660                 return;
3661         };
3662
3663         dest_map->skb = src_map->skb;
3664         pci_unmap_addr_set(dest_map, mapping,
3665                            pci_unmap_addr(src_map, mapping));
3666         dest_desc->addr_hi = src_desc->addr_hi;
3667         dest_desc->addr_lo = src_desc->addr_lo;
3668
3669         src_map->skb = NULL;
3670 }
3671
3672 #if TG3_VLAN_TAG_USED
3673 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3674 {
3675         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3676 }
3677 #endif
3678
3679 /* The RX ring scheme is composed of multiple rings which post fresh
3680  * buffers to the chip, and one special ring the chip uses to report
3681  * status back to the host.
3682  *
3683  * The special ring reports the status of received packets to the
3684  * host.  The chip does not write into the original descriptor the
3685  * RX buffer was obtained from.  The chip simply takes the original
3686  * descriptor as provided by the host, updates the status and length
3687  * field, then writes this into the next status ring entry.
3688  *
3689  * Each ring the host uses to post buffers to the chip is described
3690  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3691  * it is first placed into the on-chip ram.  When the packet's length
3692  * is known, it walks down the TG3_BDINFO entries to select the ring.
3693  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3694  * which is within the range of the new packet's length is chosen.
3695  *
3696  * The "separate ring for rx status" scheme may sound queer, but it makes
3697  * sense from a cache coherency perspective.  If only the host writes
3698  * to the buffer post rings, and only the chip writes to the rx status
3699  * rings, then cache lines never move beyond shared-modified state.
3700  * If both the host and chip were to write into the same ring, cache line
3701  * eviction could occur since both entities want it in an exclusive state.
3702  */
3703 static int tg3_rx(struct tg3 *tp, int budget)
3704 {
3705         u32 work_mask, rx_std_posted = 0;
3706         u32 sw_idx = tp->rx_rcb_ptr;
3707         u16 hw_idx;
3708         int received;
3709
3710         hw_idx = tp->hw_status->idx[0].rx_producer;
3711         /*
3712          * We need to order the read of hw_idx and the read of
3713          * the opaque cookie.
3714          */
3715         rmb();
3716         work_mask = 0;
3717         received = 0;
3718         while (sw_idx != hw_idx && budget > 0) {
3719                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3720                 unsigned int len;
3721                 struct sk_buff *skb;
3722                 dma_addr_t dma_addr;
3723                 u32 opaque_key, desc_idx, *post_ptr;
3724
3725                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3726                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3727                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3728                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3729                                                   mapping);
3730                         skb = tp->rx_std_buffers[desc_idx].skb;
3731                         post_ptr = &tp->rx_std_ptr;
3732                         rx_std_posted++;
3733                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3734                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3735                                                   mapping);
3736                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3737                         post_ptr = &tp->rx_jumbo_ptr;
3738                 }
3739                 else {
3740                         goto next_pkt_nopost;
3741                 }
3742
3743                 work_mask |= opaque_key;
3744
3745                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3746                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3747                 drop_it:
3748                         tg3_recycle_rx(tp, opaque_key,
3749                                        desc_idx, *post_ptr);
3750                 drop_it_no_recycle:
3751                         /* Other statistics kept track of by card. */
3752                         tp->net_stats.rx_dropped++;
3753                         goto next_pkt;
3754                 }
3755
3756                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3757
3758                 if (len > RX_COPY_THRESHOLD
3759                         && tp->rx_offset == 2
3760                         /* rx_offset != 2 iff this is a 5701 card running
3761                          * in PCI-X mode [see tg3_get_invariants()] */
3762                 ) {
3763                         int skb_size;
3764
3765                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3766                                                     desc_idx, *post_ptr);
3767                         if (skb_size < 0)
3768                                 goto drop_it;
3769
3770                         pci_unmap_single(tp->pdev, dma_addr,
3771                                          skb_size - tp->rx_offset,
3772                                          PCI_DMA_FROMDEVICE);
3773
3774                         skb_put(skb, len);
3775                 } else {
3776                         struct sk_buff *copy_skb;
3777
3778                         tg3_recycle_rx(tp, opaque_key,
3779                                        desc_idx, *post_ptr);
3780
3781                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3782                         if (copy_skb == NULL)
3783                                 goto drop_it_no_recycle;
3784
3785                         skb_reserve(copy_skb, 2);
3786                         skb_put(copy_skb, len);
3787                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3788                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3789                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3790
3791                         /* We'll reuse the original ring buffer. */
3792                         skb = copy_skb;
3793                 }
3794
3795                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3796                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3797                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3798                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3799                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3800                 else
3801                         skb->ip_summed = CHECKSUM_NONE;
3802
3803                 skb->protocol = eth_type_trans(skb, tp->dev);
3804 #if TG3_VLAN_TAG_USED
3805                 if (tp->vlgrp != NULL &&
3806                     desc->type_flags & RXD_FLAG_VLAN) {
3807                         tg3_vlan_rx(tp, skb,
3808                                     desc->err_vlan & RXD_VLAN_MASK);
3809                 } else
3810 #endif
3811                         netif_receive_skb(skb);
3812
3813                 tp->dev->last_rx = jiffies;
3814                 received++;
3815                 budget--;
3816
3817 next_pkt:
3818                 (*post_ptr)++;
3819
3820                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3821                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3822
3823                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3824                                      TG3_64BIT_REG_LOW, idx);
3825                         work_mask &= ~RXD_OPAQUE_RING_STD;
3826                         rx_std_posted = 0;
3827                 }
3828 next_pkt_nopost:
3829                 sw_idx++;
3830                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3831
3832                 /* Refresh hw_idx to see if there is new work */
3833                 if (sw_idx == hw_idx) {
3834                         hw_idx = tp->hw_status->idx[0].rx_producer;
3835                         rmb();
3836                 }
3837         }
3838
3839         /* ACK the status ring. */
3840         tp->rx_rcb_ptr = sw_idx;
3841         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3842
3843         /* Refill RX ring(s). */
3844         if (work_mask & RXD_OPAQUE_RING_STD) {
3845                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3846                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3847                              sw_idx);
3848         }
3849         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3850                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3851                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3852                              sw_idx);
3853         }
3854         mmiowb();
3855
3856         return received;
3857 }
3858
3859 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3860 {
3861         struct tg3_hw_status *sblk = tp->hw_status;
3862
3863         /* handle link change and other phy events */
3864         if (!(tp->tg3_flags &
3865               (TG3_FLAG_USE_LINKCHG_REG |
3866                TG3_FLAG_POLL_SERDES))) {
3867                 if (sblk->status & SD_STATUS_LINK_CHG) {
3868                         sblk->status = SD_STATUS_UPDATED |
3869                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3870                         spin_lock(&tp->lock);
3871                         tg3_setup_phy(tp, 0);
3872                         spin_unlock(&tp->lock);
3873                 }
3874         }
3875
3876         /* run TX completion thread */
3877         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3878                 tg3_tx(tp);
3879                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3880                         return work_done;
3881         }
3882
3883         /* run RX thread, within the bounds set by NAPI.
3884          * All RX "locking" is done by ensuring outside
3885          * code synchronizes with tg3->napi.poll()
3886          */
3887         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3888                 work_done += tg3_rx(tp, budget - work_done);
3889
3890         return work_done;
3891 }
3892
3893 static int tg3_poll(struct napi_struct *napi, int budget)
3894 {
3895         struct tg3 *tp = container_of(napi, struct tg3, napi);
3896         int work_done = 0;
3897         struct tg3_hw_status *sblk = tp->hw_status;
3898
3899         while (1) {
3900                 work_done = tg3_poll_work(tp, work_done, budget);
3901
3902                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3903                         goto tx_recovery;
3904
3905                 if (unlikely(work_done >= budget))
3906                         break;
3907
3908                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3909                         /* tp->last_tag is used in tg3_restart_ints() below
3910                          * to tell the hw how much work has been processed,
3911                          * so we must read it before checking for more work.
3912                          */
3913                         tp->last_tag = sblk->status_tag;
3914                         rmb();
3915                 } else
3916                         sblk->status &= ~SD_STATUS_UPDATED;
3917
3918                 if (likely(!tg3_has_work(tp))) {
3919                         netif_rx_complete(tp->dev, napi);
3920                         tg3_restart_ints(tp);
3921                         break;
3922                 }
3923         }
3924
3925         return work_done;
3926
3927 tx_recovery:
3928         /* work_done is guaranteed to be less than budget. */
3929         netif_rx_complete(tp->dev, napi);
3930         schedule_work(&tp->reset_task);
3931         return work_done;
3932 }
3933
3934 static void tg3_irq_quiesce(struct tg3 *tp)
3935 {
3936         BUG_ON(tp->irq_sync);
3937
3938         tp->irq_sync = 1;
3939         smp_mb();
3940
3941         synchronize_irq(tp->pdev->irq);
3942 }
3943
3944 static inline int tg3_irq_sync(struct tg3 *tp)
3945 {
3946         return tp->irq_sync;
3947 }
3948
3949 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3950  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3951  * with as well.  Most of the time, this is not necessary except when
3952  * shutting down the device.
3953  */
3954 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3955 {
3956         spin_lock_bh(&tp->lock);
3957         if (irq_sync)
3958                 tg3_irq_quiesce(tp);
3959 }
3960
3961 static inline void tg3_full_unlock(struct tg3 *tp)
3962 {
3963         spin_unlock_bh(&tp->lock);
3964 }
3965
3966 /* One-shot MSI handler - Chip automatically disables interrupt
3967  * after sending MSI so driver doesn't have to do it.
3968  */
3969 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3970 {
3971         struct net_device *dev = dev_id;
3972         struct tg3 *tp = netdev_priv(dev);
3973
3974         prefetch(tp->hw_status);
3975         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3976
3977         if (likely(!tg3_irq_sync(tp)))
3978                 netif_rx_schedule(dev, &tp->napi);
3979
3980         return IRQ_HANDLED;
3981 }
3982
3983 /* MSI ISR - No need to check for interrupt sharing and no need to
3984  * flush status block and interrupt mailbox. PCI ordering rules
3985  * guarantee that MSI will arrive after the status block.
3986  */
3987 static irqreturn_t tg3_msi(int irq, void *dev_id)
3988 {
3989         struct net_device *dev = dev_id;
3990         struct tg3 *tp = netdev_priv(dev);
3991
3992         prefetch(tp->hw_status);
3993         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3994         /*
3995          * Writing any value to intr-mbox-0 clears PCI INTA# and
3996          * chip-internal interrupt pending events.
3997          * Writing non-zero to intr-mbox-0 additional tells the
3998          * NIC to stop sending us irqs, engaging "in-intr-handler"
3999          * event coalescing.
4000          */
4001         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4002         if (likely(!tg3_irq_sync(tp)))
4003                 netif_rx_schedule(dev, &tp->napi);
4004
4005         return IRQ_RETVAL(1);
4006 }
4007
4008 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4009 {
4010         struct net_device *dev = dev_id;
4011         struct tg3 *tp = netdev_priv(dev);
4012         struct tg3_hw_status *sblk = tp->hw_status;
4013         unsigned int handled = 1;
4014
4015         /* In INTx mode, it is possible for the interrupt to arrive at
4016          * the CPU before the status block posted prior to the interrupt.
4017          * Reading the PCI State register will confirm whether the
4018          * interrupt is ours and will flush the status block.
4019          */
4020         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4021                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4022                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4023                         handled = 0;
4024                         goto out;
4025                 }
4026         }
4027
4028         /*
4029          * Writing any value to intr-mbox-0 clears PCI INTA# and
4030          * chip-internal interrupt pending events.
4031          * Writing non-zero to intr-mbox-0 additional tells the
4032          * NIC to stop sending us irqs, engaging "in-intr-handler"
4033          * event coalescing.
4034          *
4035          * Flush the mailbox to de-assert the IRQ immediately to prevent
4036          * spurious interrupts.  The flush impacts performance but
4037          * excessive spurious interrupts can be worse in some cases.
4038          */
4039         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4040         if (tg3_irq_sync(tp))
4041                 goto out;
4042         sblk->status &= ~SD_STATUS_UPDATED;
4043         if (likely(tg3_has_work(tp))) {
4044                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4045                 netif_rx_schedule(dev, &tp->napi);
4046         } else {
4047                 /* No work, shared interrupt perhaps?  re-enable
4048                  * interrupts, and flush that PCI write
4049                  */
4050                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4051                                0x00000000);
4052         }
4053 out:
4054         return IRQ_RETVAL(handled);
4055 }
4056
4057 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4058 {
4059         struct net_device *dev = dev_id;
4060         struct tg3 *tp = netdev_priv(dev);
4061         struct tg3_hw_status *sblk = tp->hw_status;
4062         unsigned int handled = 1;
4063
4064         /* In INTx mode, it is possible for the interrupt to arrive at
4065          * the CPU before the status block posted prior to the interrupt.
4066          * Reading the PCI State register will confirm whether the
4067          * interrupt is ours and will flush the status block.
4068          */
4069         if (unlikely(sblk->status_tag == tp->last_tag)) {
4070                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4071                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4072                         handled = 0;
4073                         goto out;
4074                 }
4075         }
4076
4077         /*
4078          * writing any value to intr-mbox-0 clears PCI INTA# and
4079          * chip-internal interrupt pending events.
4080          * writing non-zero to intr-mbox-0 additional tells the
4081          * NIC to stop sending us irqs, engaging "in-intr-handler"
4082          * event coalescing.
4083          *
4084          * Flush the mailbox to de-assert the IRQ immediately to prevent
4085          * spurious interrupts.  The flush impacts performance but
4086          * excessive spurious interrupts can be worse in some cases.
4087          */
4088         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4089         if (tg3_irq_sync(tp))
4090                 goto out;
4091         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4092                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4093                 /* Update last_tag to mark that this status has been
4094                  * seen. Because interrupt may be shared, we may be
4095                  * racing with tg3_poll(), so only update last_tag
4096                  * if tg3_poll() is not scheduled.
4097                  */
4098                 tp->last_tag = sblk->status_tag;
4099                 __netif_rx_schedule(dev, &tp->napi);
4100         }
4101 out:
4102         return IRQ_RETVAL(handled);
4103 }
4104
4105 /* ISR for interrupt test */
4106 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4107 {
4108         struct net_device *dev = dev_id;
4109         struct tg3 *tp = netdev_priv(dev);
4110         struct tg3_hw_status *sblk = tp->hw_status;
4111
4112         if ((sblk->status & SD_STATUS_UPDATED) ||
4113             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4114                 tg3_disable_ints(tp);
4115                 return IRQ_RETVAL(1);
4116         }
4117         return IRQ_RETVAL(0);
4118 }
4119
4120 static int tg3_init_hw(struct tg3 *, int);
4121 static int tg3_halt(struct tg3 *, int, int);
4122
4123 /* Restart hardware after configuration changes, self-test, etc.
4124  * Invoked with tp->lock held.
4125  */
4126 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4127         __releases(tp->lock)
4128         __acquires(tp->lock)
4129 {
4130         int err;
4131
4132         err = tg3_init_hw(tp, reset_phy);
4133         if (err) {
4134                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4135                        "aborting.\n", tp->dev->name);
4136                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4137                 tg3_full_unlock(tp);
4138                 del_timer_sync(&tp->timer);
4139                 tp->irq_sync = 0;
4140                 napi_enable(&tp->napi);
4141                 dev_close(tp->dev);
4142                 tg3_full_lock(tp, 0);
4143         }
4144         return err;
4145 }
4146
4147 #ifdef CONFIG_NET_POLL_CONTROLLER
4148 static void tg3_poll_controller(struct net_device *dev)
4149 {
4150         struct tg3 *tp = netdev_priv(dev);
4151
4152         tg3_interrupt(tp->pdev->irq, dev);
4153 }
4154 #endif
4155
4156 static void tg3_reset_task(struct work_struct *work)
4157 {
4158         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4159         unsigned int restart_timer;
4160
4161         tg3_full_lock(tp, 0);
4162
4163         if (!netif_running(tp->dev)) {
4164                 tg3_full_unlock(tp);
4165                 return;
4166         }
4167
4168         tg3_full_unlock(tp);
4169
4170         tg3_netif_stop(tp);
4171
4172         tg3_full_lock(tp, 1);
4173
4174         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4175         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4176
4177         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4178                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4179                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4180                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4181                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4182         }
4183
4184         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4185         if (tg3_init_hw(tp, 1))
4186                 goto out;
4187
4188         tg3_netif_start(tp);
4189
4190         if (restart_timer)
4191                 mod_timer(&tp->timer, jiffies + 1);
4192
4193 out:
4194         tg3_full_unlock(tp);
4195 }
4196
4197 static void tg3_dump_short_state(struct tg3 *tp)
4198 {
4199         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4200                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4201         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4202                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4203 }
4204
4205 static void tg3_tx_timeout(struct net_device *dev)
4206 {
4207         struct tg3 *tp = netdev_priv(dev);
4208
4209         if (netif_msg_tx_err(tp)) {
4210                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4211                        dev->name);
4212                 tg3_dump_short_state(tp);
4213         }
4214
4215         schedule_work(&tp->reset_task);
4216 }
4217
4218 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4219 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4220 {
4221         u32 base = (u32) mapping & 0xffffffff;
4222
4223         return ((base > 0xffffdcc0) &&
4224                 (base + len + 8 < base));
4225 }
4226
4227 /* Test for DMA addresses > 40-bit */
4228 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4229                                           int len)
4230 {
4231 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4232         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4233                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4234         return 0;
4235 #else
4236         return 0;
4237 #endif
4238 }
4239
4240 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4241
4242 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4243 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4244                                        u32 last_plus_one, u32 *start,
4245                                        u32 base_flags, u32 mss)
4246 {
4247         struct sk_buff *new_skb;
4248         dma_addr_t new_addr = 0;
4249         u32 entry = *start;
4250         int i, ret = 0;
4251
4252         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4253                 new_skb = skb_copy(skb, GFP_ATOMIC);
4254         else {
4255                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4256
4257                 new_skb = skb_copy_expand(skb,
4258                                           skb_headroom(skb) + more_headroom,
4259                                           skb_tailroom(skb), GFP_ATOMIC);
4260         }
4261
4262         if (!new_skb) {
4263                 ret = -1;
4264         } else {
4265                 /* New SKB is guaranteed to be linear. */
4266                 entry = *start;
4267                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4268                                           PCI_DMA_TODEVICE);
4269                 /* Make sure new skb does not cross any 4G boundaries.
4270                  * Drop the packet if it does.
4271                  */
4272                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4273                         ret = -1;
4274                         dev_kfree_skb(new_skb);
4275                         new_skb = NULL;
4276                 } else {
4277                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4278                                     base_flags, 1 | (mss << 1));
4279                         *start = NEXT_TX(entry);
4280                 }
4281         }
4282
4283         /* Now clean up the sw ring entries. */
4284         i = 0;
4285         while (entry != last_plus_one) {
4286                 int len;
4287
4288                 if (i == 0)
4289                         len = skb_headlen(skb);
4290                 else
4291                         len = skb_shinfo(skb)->frags[i-1].size;
4292                 pci_unmap_single(tp->pdev,
4293                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4294                                  len, PCI_DMA_TODEVICE);
4295                 if (i == 0) {
4296                         tp->tx_buffers[entry].skb = new_skb;
4297                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4298                 } else {
4299                         tp->tx_buffers[entry].skb = NULL;
4300                 }
4301                 entry = NEXT_TX(entry);
4302                 i++;
4303         }
4304
4305         dev_kfree_skb(skb);
4306
4307         return ret;
4308 }
4309
4310 static void tg3_set_txd(struct tg3 *tp, int entry,
4311                         dma_addr_t mapping, int len, u32 flags,
4312                         u32 mss_and_is_end)
4313 {
4314         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4315         int is_end = (mss_and_is_end & 0x1);
4316         u32 mss = (mss_and_is_end >> 1);
4317         u32 vlan_tag = 0;
4318
4319         if (is_end)
4320                 flags |= TXD_FLAG_END;
4321         if (flags & TXD_FLAG_VLAN) {
4322                 vlan_tag = flags >> 16;
4323                 flags &= 0xffff;
4324         }
4325         vlan_tag |= (mss << TXD_MSS_SHIFT);
4326
4327         txd->addr_hi = ((u64) mapping >> 32);
4328         txd->addr_lo = ((u64) mapping & 0xffffffff);
4329         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4330         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4331 }
4332
4333 /* hard_start_xmit for devices that don't have any bugs and
4334  * support TG3_FLG2_HW_TSO_2 only.
4335  */
4336 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4337 {
4338         struct tg3 *tp = netdev_priv(dev);
4339         dma_addr_t mapping;
4340         u32 len, entry, base_flags, mss;
4341
4342         len = skb_headlen(skb);
4343
4344         /* We are running in BH disabled context with netif_tx_lock
4345          * and TX reclaim runs via tp->napi.poll inside of a software
4346          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4347          * no IRQ context deadlocks to worry about either.  Rejoice!
4348          */
4349         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4350                 if (!netif_queue_stopped(dev)) {
4351                         netif_stop_queue(dev);
4352
4353                         /* This is a hard error, log it. */
4354                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4355                                "queue awake!\n", dev->name);
4356                 }
4357                 return NETDEV_TX_BUSY;
4358         }
4359
4360         entry = tp->tx_prod;
4361         base_flags = 0;
4362         mss = 0;
4363         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4364                 int tcp_opt_len, ip_tcp_len;
4365
4366                 if (skb_header_cloned(skb) &&
4367                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4368                         dev_kfree_skb(skb);
4369                         goto out_unlock;
4370                 }
4371
4372                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4373                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4374                 else {
4375                         struct iphdr *iph = ip_hdr(skb);
4376
4377                         tcp_opt_len = tcp_optlen(skb);
4378                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4379
4380                         iph->check = 0;
4381                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4382                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4383                 }
4384
4385                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4386                                TXD_FLAG_CPU_POST_DMA);
4387
4388                 tcp_hdr(skb)->check = 0;
4389
4390         }
4391         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4392                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4393 #if TG3_VLAN_TAG_USED
4394         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4395                 base_flags |= (TXD_FLAG_VLAN |
4396                                (vlan_tx_tag_get(skb) << 16));
4397 #endif
4398
4399         /* Queue skb data, a.k.a. the main skb fragment. */
4400         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4401
4402         tp->tx_buffers[entry].skb = skb;
4403         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4404
4405         tg3_set_txd(tp, entry, mapping, len, base_flags,
4406                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4407
4408         entry = NEXT_TX(entry);
4409
4410         /* Now loop through additional data fragments, and queue them. */
4411         if (skb_shinfo(skb)->nr_frags > 0) {
4412                 unsigned int i, last;
4413
4414                 last = skb_shinfo(skb)->nr_frags - 1;
4415                 for (i = 0; i <= last; i++) {
4416                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4417
4418                         len = frag->size;
4419                         mapping = pci_map_page(tp->pdev,
4420                                                frag->page,
4421                                                frag->page_offset,
4422                                                len, PCI_DMA_TODEVICE);
4423
4424                         tp->tx_buffers[entry].skb = NULL;
4425                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4426
4427                         tg3_set_txd(tp, entry, mapping, len,
4428                                     base_flags, (i == last) | (mss << 1));
4429
4430                         entry = NEXT_TX(entry);
4431                 }
4432         }
4433
4434         /* Packets are ready, update Tx producer idx local and on card. */
4435         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4436
4437         tp->tx_prod = entry;
4438         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4439                 netif_stop_queue(dev);
4440                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4441                         netif_wake_queue(tp->dev);
4442         }
4443
4444 out_unlock:
4445         mmiowb();
4446
4447         dev->trans_start = jiffies;
4448
4449         return NETDEV_TX_OK;
4450 }
4451
4452 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4453
4454 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4455  * TSO header is greater than 80 bytes.
4456  */
4457 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4458 {
4459         struct sk_buff *segs, *nskb;
4460
4461         /* Estimate the number of fragments in the worst case */
4462         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4463                 netif_stop_queue(tp->dev);
4464                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4465                         return NETDEV_TX_BUSY;
4466
4467                 netif_wake_queue(tp->dev);
4468         }
4469
4470         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4471         if (IS_ERR(segs))
4472                 goto tg3_tso_bug_end;
4473
4474         do {
4475                 nskb = segs;
4476                 segs = segs->next;
4477                 nskb->next = NULL;
4478                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4479         } while (segs);
4480
4481 tg3_tso_bug_end:
4482         dev_kfree_skb(skb);
4483
4484         return NETDEV_TX_OK;
4485 }
4486
4487 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4488  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4489  */
4490 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4491 {
4492         struct tg3 *tp = netdev_priv(dev);
4493         dma_addr_t mapping;
4494         u32 len, entry, base_flags, mss;
4495         int would_hit_hwbug;
4496
4497         len = skb_headlen(skb);
4498
4499         /* We are running in BH disabled context with netif_tx_lock
4500          * and TX reclaim runs via tp->napi.poll inside of a software
4501          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4502          * no IRQ context deadlocks to worry about either.  Rejoice!
4503          */
4504         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4505                 if (!netif_queue_stopped(dev)) {
4506                         netif_stop_queue(dev);
4507
4508                         /* This is a hard error, log it. */
4509                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4510                                "queue awake!\n", dev->name);
4511                 }
4512                 return NETDEV_TX_BUSY;
4513         }
4514
4515         entry = tp->tx_prod;
4516         base_flags = 0;
4517         if (skb->ip_summed == CHECKSUM_PARTIAL)
4518                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4519         mss = 0;
4520         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4521                 struct iphdr *iph;
4522                 int tcp_opt_len, ip_tcp_len, hdr_len;
4523
4524                 if (skb_header_cloned(skb) &&
4525                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4526                         dev_kfree_skb(skb);
4527                         goto out_unlock;
4528                 }
4529
4530                 tcp_opt_len = tcp_optlen(skb);
4531                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4532
4533                 hdr_len = ip_tcp_len + tcp_opt_len;
4534                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4535                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4536                         return (tg3_tso_bug(tp, skb));
4537
4538                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4539                                TXD_FLAG_CPU_POST_DMA);
4540
4541                 iph = ip_hdr(skb);
4542                 iph->check = 0;
4543                 iph->tot_len = htons(mss + hdr_len);
4544                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4545                         tcp_hdr(skb)->check = 0;
4546                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4547                 } else
4548                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4549                                                                  iph->daddr, 0,
4550                                                                  IPPROTO_TCP,
4551                                                                  0);
4552
4553                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4554                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4555                         if (tcp_opt_len || iph->ihl > 5) {
4556                                 int tsflags;
4557
4558                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4559                                 mss |= (tsflags << 11);
4560                         }
4561                 } else {
4562                         if (tcp_opt_len || iph->ihl > 5) {
4563                                 int tsflags;
4564
4565                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4566                                 base_flags |= tsflags << 12;
4567                         }
4568                 }
4569         }
4570 #if TG3_VLAN_TAG_USED
4571         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4572                 base_flags |= (TXD_FLAG_VLAN |
4573                                (vlan_tx_tag_get(skb) << 16));
4574 #endif
4575
4576         /* Queue skb data, a.k.a. the main skb fragment. */
4577         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4578
4579         tp->tx_buffers[entry].skb = skb;
4580         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4581
4582         would_hit_hwbug = 0;
4583
4584         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4585                 would_hit_hwbug = 1;
4586         else if (tg3_4g_overflow_test(mapping, len))
4587                 would_hit_hwbug = 1;
4588
4589         tg3_set_txd(tp, entry, mapping, len, base_flags,
4590                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4591
4592         entry = NEXT_TX(entry);
4593
4594         /* Now loop through additional data fragments, and queue them. */
4595         if (skb_shinfo(skb)->nr_frags > 0) {
4596                 unsigned int i, last;
4597
4598                 last = skb_shinfo(skb)->nr_frags - 1;
4599                 for (i = 0; i <= last; i++) {
4600                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4601
4602                         len = frag->size;
4603                         mapping = pci_map_page(tp->pdev,
4604                                                frag->page,
4605                                                frag->page_offset,
4606                                                len, PCI_DMA_TODEVICE);
4607
4608                         tp->tx_buffers[entry].skb = NULL;
4609                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4610
4611                         if (tg3_4g_overflow_test(mapping, len))
4612                                 would_hit_hwbug = 1;
4613
4614                         if (tg3_40bit_overflow_test(tp, mapping, len))
4615                                 would_hit_hwbug = 1;
4616
4617                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4618                                 tg3_set_txd(tp, entry, mapping, len,
4619                                             base_flags, (i == last)|(mss << 1));
4620                         else
4621                                 tg3_set_txd(tp, entry, mapping, len,
4622                                             base_flags, (i == last));
4623
4624                         entry = NEXT_TX(entry);
4625                 }
4626         }
4627
4628         if (would_hit_hwbug) {
4629                 u32 last_plus_one = entry;
4630                 u32 start;
4631
4632                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4633                 start &= (TG3_TX_RING_SIZE - 1);
4634
4635                 /* If the workaround fails due to memory/mapping
4636                  * failure, silently drop this packet.
4637                  */
4638                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4639                                                 &start, base_flags, mss))
4640                         goto out_unlock;
4641
4642                 entry = start;
4643         }
4644
4645         /* Packets are ready, update Tx producer idx local and on card. */
4646         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4647
4648         tp->tx_prod = entry;
4649         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4650                 netif_stop_queue(dev);
4651                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4652                         netif_wake_queue(tp->dev);
4653         }
4654
4655 out_unlock:
4656         mmiowb();
4657
4658         dev->trans_start = jiffies;
4659
4660         return NETDEV_TX_OK;
4661 }
4662
4663 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4664                                int new_mtu)
4665 {
4666         dev->mtu = new_mtu;
4667
4668         if (new_mtu > ETH_DATA_LEN) {
4669                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4670                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4671                         ethtool_op_set_tso(dev, 0);
4672                 }
4673                 else
4674                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4675         } else {
4676                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4677                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4678                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4679         }
4680 }
4681
4682 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4683 {
4684         struct tg3 *tp = netdev_priv(dev);
4685         int err;
4686
4687         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4688                 return -EINVAL;
4689
4690         if (!netif_running(dev)) {
4691                 /* We'll just catch it later when the
4692                  * device is up'd.
4693                  */
4694                 tg3_set_mtu(dev, tp, new_mtu);
4695                 return 0;
4696         }
4697
4698         tg3_netif_stop(tp);
4699
4700         tg3_full_lock(tp, 1);
4701
4702         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4703
4704         tg3_set_mtu(dev, tp, new_mtu);
4705
4706         err = tg3_restart_hw(tp, 0);
4707
4708         if (!err)
4709                 tg3_netif_start(tp);
4710
4711         tg3_full_unlock(tp);
4712
4713         return err;
4714 }
4715
4716 /* Free up pending packets in all rx/tx rings.
4717  *
4718  * The chip has been shut down and the driver detached from
4719  * the networking, so no interrupts or new tx packets will
4720  * end up in the driver.  tp->{tx,}lock is not held and we are not
4721  * in an interrupt context and thus may sleep.
4722  */
4723 static void tg3_free_rings(struct tg3 *tp)
4724 {
4725         struct ring_info *rxp;
4726         int i;
4727
4728         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4729                 rxp = &tp->rx_std_buffers[i];
4730
4731                 if (rxp->skb == NULL)
4732                         continue;
4733                 pci_unmap_single(tp->pdev,
4734                                  pci_unmap_addr(rxp, mapping),
4735                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4736                                  PCI_DMA_FROMDEVICE);
4737                 dev_kfree_skb_any(rxp->skb);
4738                 rxp->skb = NULL;
4739         }
4740
4741         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4742                 rxp = &tp->rx_jumbo_buffers[i];
4743
4744                 if (rxp->skb == NULL)
4745                         continue;
4746                 pci_unmap_single(tp->pdev,
4747                                  pci_unmap_addr(rxp, mapping),
4748                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4749                                  PCI_DMA_FROMDEVICE);
4750                 dev_kfree_skb_any(rxp->skb);
4751                 rxp->skb = NULL;
4752         }
4753
4754         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4755                 struct tx_ring_info *txp;
4756                 struct sk_buff *skb;
4757                 int j;
4758
4759                 txp = &tp->tx_buffers[i];
4760                 skb = txp->skb;
4761
4762                 if (skb == NULL) {
4763                         i++;
4764                         continue;
4765                 }
4766
4767                 pci_unmap_single(tp->pdev,
4768                                  pci_unmap_addr(txp, mapping),
4769                                  skb_headlen(skb),
4770                                  PCI_DMA_TODEVICE);
4771                 txp->skb = NULL;
4772
4773                 i++;
4774
4775                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4776                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4777                         pci_unmap_page(tp->pdev,
4778                                        pci_unmap_addr(txp, mapping),
4779                                        skb_shinfo(skb)->frags[j].size,
4780                                        PCI_DMA_TODEVICE);
4781                         i++;
4782                 }
4783
4784                 dev_kfree_skb_any(skb);
4785         }
4786 }
4787
4788 /* Initialize tx/rx rings for packet processing.
4789  *
4790  * The chip has been shut down and the driver detached from
4791  * the networking, so no interrupts or new tx packets will
4792  * end up in the driver.  tp->{tx,}lock are held and thus
4793  * we may not sleep.
4794  */
4795 static int tg3_init_rings(struct tg3 *tp)
4796 {
4797         u32 i;
4798
4799         /* Free up all the SKBs. */
4800         tg3_free_rings(tp);
4801
4802         /* Zero out all descriptors. */
4803         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4804         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4805         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4806         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4807
4808         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4809         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4810             (tp->dev->mtu > ETH_DATA_LEN))
4811                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4812
4813         /* Initialize invariants of the rings, we only set this
4814          * stuff once.  This works because the card does not
4815          * write into the rx buffer posting rings.
4816          */
4817         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4818                 struct tg3_rx_buffer_desc *rxd;
4819
4820                 rxd = &tp->rx_std[i];
4821                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4822                         << RXD_LEN_SHIFT;
4823                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4824                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4825                                (i << RXD_OPAQUE_INDEX_SHIFT));
4826         }
4827
4828         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4829                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4830                         struct tg3_rx_buffer_desc *rxd;
4831
4832                         rxd = &tp->rx_jumbo[i];
4833                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4834                                 << RXD_LEN_SHIFT;
4835                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4836                                 RXD_FLAG_JUMBO;
4837                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4838                                (i << RXD_OPAQUE_INDEX_SHIFT));
4839                 }
4840         }
4841
4842         /* Now allocate fresh SKBs for each rx ring. */
4843         for (i = 0; i < tp->rx_pending; i++) {
4844                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4845                         printk(KERN_WARNING PFX
4846                                "%s: Using a smaller RX standard ring, "
4847                                "only %d out of %d buffers were allocated "
4848                                "successfully.\n",
4849                                tp->dev->name, i, tp->rx_pending);
4850                         if (i == 0)
4851                                 return -ENOMEM;
4852                         tp->rx_pending = i;
4853                         break;
4854                 }
4855         }
4856
4857         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4858                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4859                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4860                                              -1, i) < 0) {
4861                                 printk(KERN_WARNING PFX
4862                                        "%s: Using a smaller RX jumbo ring, "
4863                                        "only %d out of %d buffers were "
4864                                        "allocated successfully.\n",
4865                                        tp->dev->name, i, tp->rx_jumbo_pending);
4866                                 if (i == 0) {
4867                                         tg3_free_rings(tp);
4868                                         return -ENOMEM;
4869                                 }
4870                                 tp->rx_jumbo_pending = i;
4871                                 break;
4872                         }
4873                 }
4874         }
4875         return 0;
4876 }
4877
4878 /*
4879  * Must not be invoked with interrupt sources disabled and
4880  * the hardware shutdown down.
4881  */
4882 static void tg3_free_consistent(struct tg3 *tp)
4883 {
4884         kfree(tp->rx_std_buffers);
4885         tp->rx_std_buffers = NULL;
4886         if (tp->rx_std) {
4887                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4888                                     tp->rx_std, tp->rx_std_mapping);
4889                 tp->rx_std = NULL;
4890         }
4891         if (tp->rx_jumbo) {
4892                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4893                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4894                 tp->rx_jumbo = NULL;
4895         }
4896         if (tp->rx_rcb) {
4897                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4898                                     tp->rx_rcb, tp->rx_rcb_mapping);
4899                 tp->rx_rcb = NULL;
4900         }
4901         if (tp->tx_ring) {
4902                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4903                         tp->tx_ring, tp->tx_desc_mapping);
4904                 tp->tx_ring = NULL;
4905         }
4906         if (tp->hw_status) {
4907                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4908                                     tp->hw_status, tp->status_mapping);
4909                 tp->hw_status = NULL;
4910         }
4911         if (tp->hw_stats) {
4912                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4913                                     tp->hw_stats, tp->stats_mapping);
4914                 tp->hw_stats = NULL;
4915         }
4916 }
4917
4918 /*
4919  * Must not be invoked with interrupt sources disabled and
4920  * the hardware shutdown down.  Can sleep.
4921  */
4922 static int tg3_alloc_consistent(struct tg3 *tp)
4923 {
4924         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4925                                       (TG3_RX_RING_SIZE +
4926                                        TG3_RX_JUMBO_RING_SIZE)) +
4927                                      (sizeof(struct tx_ring_info) *
4928                                       TG3_TX_RING_SIZE),
4929                                      GFP_KERNEL);
4930         if (!tp->rx_std_buffers)
4931                 return -ENOMEM;
4932
4933         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4934         tp->tx_buffers = (struct tx_ring_info *)
4935                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4936
4937         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4938                                           &tp->rx_std_mapping);
4939         if (!tp->rx_std)
4940                 goto err_out;
4941
4942         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4943                                             &tp->rx_jumbo_mapping);
4944
4945         if (!tp->rx_jumbo)
4946                 goto err_out;
4947
4948         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4949                                           &tp->rx_rcb_mapping);
4950         if (!tp->rx_rcb)
4951                 goto err_out;
4952
4953         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4954                                            &tp->tx_desc_mapping);
4955         if (!tp->tx_ring)
4956                 goto err_out;
4957
4958         tp->hw_status = pci_alloc_consistent(tp->pdev,
4959                                              TG3_HW_STATUS_SIZE,
4960                                              &tp->status_mapping);
4961         if (!tp->hw_status)
4962                 goto err_out;
4963
4964         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4965                                             sizeof(struct tg3_hw_stats),
4966                                             &tp->stats_mapping);
4967         if (!tp->hw_stats)
4968                 goto err_out;
4969
4970         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4971         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4972
4973         return 0;
4974
4975 err_out:
4976         tg3_free_consistent(tp);
4977         return -ENOMEM;
4978 }
4979
4980 #define MAX_WAIT_CNT 1000
4981
4982 /* To stop a block, clear the enable bit and poll till it
4983  * clears.  tp->lock is held.
4984  */
4985 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4986 {
4987         unsigned int i;
4988         u32 val;
4989
4990         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4991                 switch (ofs) {
4992                 case RCVLSC_MODE:
4993                 case DMAC_MODE:
4994                 case MBFREE_MODE:
4995                 case BUFMGR_MODE:
4996                 case MEMARB_MODE:
4997                         /* We can't enable/disable these bits of the
4998                          * 5705/5750, just say success.
4999                          */
5000                         return 0;
5001
5002                 default:
5003                         break;
5004                 };
5005         }
5006
5007         val = tr32(ofs);
5008         val &= ~enable_bit;
5009         tw32_f(ofs, val);
5010
5011         for (i = 0; i < MAX_WAIT_CNT; i++) {
5012                 udelay(100);
5013                 val = tr32(ofs);
5014                 if ((val & enable_bit) == 0)
5015                         break;
5016         }
5017
5018         if (i == MAX_WAIT_CNT && !silent) {
5019                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5020                        "ofs=%lx enable_bit=%x\n",
5021                        ofs, enable_bit);
5022                 return -ENODEV;
5023         }
5024
5025         return 0;
5026 }
5027
5028 /* tp->lock is held. */
5029 static int tg3_abort_hw(struct tg3 *tp, int silent)
5030 {
5031         int i, err;
5032
5033         tg3_disable_ints(tp);
5034
5035         tp->rx_mode &= ~RX_MODE_ENABLE;
5036         tw32_f(MAC_RX_MODE, tp->rx_mode);
5037         udelay(10);
5038
5039         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5040         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5041         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5042         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5043         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5044         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5045
5046         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5047         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5048         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5049         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5050         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5051         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5052         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5053
5054         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5055         tw32_f(MAC_MODE, tp->mac_mode);
5056         udelay(40);
5057
5058         tp->tx_mode &= ~TX_MODE_ENABLE;
5059         tw32_f(MAC_TX_MODE, tp->tx_mode);
5060
5061         for (i = 0; i < MAX_WAIT_CNT; i++) {
5062                 udelay(100);
5063                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5064                         break;
5065         }
5066         if (i >= MAX_WAIT_CNT) {
5067                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5068                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5069                        tp->dev->name, tr32(MAC_TX_MODE));
5070                 err |= -ENODEV;
5071         }
5072
5073         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5074         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5075         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5076
5077         tw32(FTQ_RESET, 0xffffffff);
5078         tw32(FTQ_RESET, 0x00000000);
5079
5080         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5081         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5082
5083         if (tp->hw_status)
5084                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5085         if (tp->hw_stats)
5086                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5087
5088         return err;
5089 }
5090
5091 /* tp->lock is held. */
5092 static int tg3_nvram_lock(struct tg3 *tp)
5093 {
5094         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5095                 int i;
5096
5097                 if (tp->nvram_lock_cnt == 0) {
5098                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5099                         for (i = 0; i < 8000; i++) {
5100                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5101                                         break;
5102                                 udelay(20);
5103                         }
5104                         if (i == 8000) {
5105                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5106                                 return -ENODEV;
5107                         }
5108                 }
5109                 tp->nvram_lock_cnt++;
5110         }
5111         return 0;
5112 }
5113
5114 /* tp->lock is held. */
5115 static void tg3_nvram_unlock(struct tg3 *tp)
5116 {
5117         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5118                 if (tp->nvram_lock_cnt > 0)
5119                         tp->nvram_lock_cnt--;
5120                 if (tp->nvram_lock_cnt == 0)
5121                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5122         }
5123 }
5124
5125 /* tp->lock is held. */
5126 static void tg3_enable_nvram_access(struct tg3 *tp)
5127 {
5128         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5129             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5130                 u32 nvaccess = tr32(NVRAM_ACCESS);
5131
5132                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5133         }
5134 }
5135
5136 /* tp->lock is held. */
5137 static void tg3_disable_nvram_access(struct tg3 *tp)
5138 {
5139         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5140             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5141                 u32 nvaccess = tr32(NVRAM_ACCESS);
5142
5143                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5144         }
5145 }
5146
5147 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5148 {
5149         int i;
5150         u32 apedata;
5151
5152         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5153         if (apedata != APE_SEG_SIG_MAGIC)
5154                 return;
5155
5156         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5157         if (apedata != APE_FW_STATUS_READY)
5158                 return;
5159
5160         /* Wait for up to 1 millisecond for APE to service previous event. */
5161         for (i = 0; i < 10; i++) {
5162                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5163                         return;
5164
5165                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5166
5167                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5168                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5169                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5170
5171                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5172
5173                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5174                         break;
5175
5176                 udelay(100);
5177         }
5178
5179         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5180                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5181 }
5182
5183 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5184 {
5185         u32 event;
5186         u32 apedata;
5187
5188         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5189                 return;
5190
5191         switch (kind) {
5192                 case RESET_KIND_INIT:
5193                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5194                                         APE_HOST_SEG_SIG_MAGIC);
5195                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5196                                         APE_HOST_SEG_LEN_MAGIC);
5197                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5198                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5199                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5200                                         APE_HOST_DRIVER_ID_MAGIC);
5201                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5202                                         APE_HOST_BEHAV_NO_PHYLOCK);
5203
5204                         event = APE_EVENT_STATUS_STATE_START;
5205                         break;
5206                 case RESET_KIND_SHUTDOWN:
5207                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5208                         break;
5209                 case RESET_KIND_SUSPEND:
5210                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5211                         break;
5212                 default:
5213                         return;
5214         }
5215
5216         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5217
5218         tg3_ape_send_event(tp, event);
5219 }
5220
5221 /* tp->lock is held. */
5222 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5223 {
5224         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5225                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5226
5227         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5228                 switch (kind) {
5229                 case RESET_KIND_INIT:
5230                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5231                                       DRV_STATE_START);
5232                         break;
5233
5234                 case RESET_KIND_SHUTDOWN:
5235                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5236                                       DRV_STATE_UNLOAD);
5237                         break;
5238
5239                 case RESET_KIND_SUSPEND:
5240                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5241                                       DRV_STATE_SUSPEND);
5242                         break;
5243
5244                 default:
5245                         break;
5246                 };
5247         }
5248
5249         if (kind == RESET_KIND_INIT ||
5250             kind == RESET_KIND_SUSPEND)
5251                 tg3_ape_driver_state_change(tp, kind);
5252 }
5253
5254 /* tp->lock is held. */
5255 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5256 {
5257         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5258                 switch (kind) {
5259                 case RESET_KIND_INIT:
5260                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5261                                       DRV_STATE_START_DONE);
5262                         break;
5263
5264                 case RESET_KIND_SHUTDOWN:
5265                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5266                                       DRV_STATE_UNLOAD_DONE);
5267                         break;
5268
5269                 default:
5270                         break;
5271                 };
5272         }
5273
5274         if (kind == RESET_KIND_SHUTDOWN)
5275                 tg3_ape_driver_state_change(tp, kind);
5276 }
5277
5278 /* tp->lock is held. */
5279 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5280 {
5281         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5282                 switch (kind) {
5283                 case RESET_KIND_INIT:
5284                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5285                                       DRV_STATE_START);
5286                         break;
5287
5288                 case RESET_KIND_SHUTDOWN:
5289                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5290                                       DRV_STATE_UNLOAD);
5291                         break;
5292
5293                 case RESET_KIND_SUSPEND:
5294                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5295                                       DRV_STATE_SUSPEND);
5296                         break;
5297
5298                 default:
5299                         break;
5300                 };
5301         }
5302 }
5303
5304 static int tg3_poll_fw(struct tg3 *tp)
5305 {
5306         int i;
5307         u32 val;
5308
5309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5310                 /* Wait up to 20ms for init done. */
5311                 for (i = 0; i < 200; i++) {
5312                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5313                                 return 0;
5314                         udelay(100);
5315                 }
5316                 return -ENODEV;
5317         }
5318
5319         /* Wait for firmware initialization to complete. */
5320         for (i = 0; i < 100000; i++) {
5321                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5322                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5323                         break;
5324                 udelay(10);
5325         }
5326
5327         /* Chip might not be fitted with firmware.  Some Sun onboard
5328          * parts are configured like that.  So don't signal the timeout
5329          * of the above loop as an error, but do report the lack of
5330          * running firmware once.
5331          */
5332         if (i >= 100000 &&
5333             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5334                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5335
5336                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5337                        tp->dev->name);
5338         }
5339
5340         return 0;
5341 }
5342
5343 /* Save PCI command register before chip reset */
5344 static void tg3_save_pci_state(struct tg3 *tp)
5345 {
5346         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5347 }
5348
5349 /* Restore PCI state after chip reset */
5350 static void tg3_restore_pci_state(struct tg3 *tp)
5351 {
5352         u32 val;
5353
5354         /* Re-enable indirect register accesses. */
5355         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5356                                tp->misc_host_ctrl);
5357
5358         /* Set MAX PCI retry to zero. */
5359         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5360         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5361             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5362                 val |= PCISTATE_RETRY_SAME_DMA;
5363         /* Allow reads and writes to the APE register and memory space. */
5364         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5365                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5366                        PCISTATE_ALLOW_APE_SHMEM_WR;
5367         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5368
5369         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5370
5371         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5372                 pcie_set_readrq(tp->pdev, 4096);
5373         else {
5374                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5375                                       tp->pci_cacheline_sz);
5376                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5377                                       tp->pci_lat_timer);
5378         }
5379
5380         /* Make sure PCI-X relaxed ordering bit is clear. */
5381         if (tp->pcix_cap) {
5382                 u16 pcix_cmd;
5383
5384                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5385                                      &pcix_cmd);
5386                 pcix_cmd &= ~PCI_X_CMD_ERO;
5387                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5388                                       pcix_cmd);
5389         }
5390
5391         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5392
5393                 /* Chip reset on 5780 will reset MSI enable bit,
5394                  * so need to restore it.
5395                  */
5396                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5397                         u16 ctrl;
5398
5399                         pci_read_config_word(tp->pdev,
5400                                              tp->msi_cap + PCI_MSI_FLAGS,
5401                                              &ctrl);
5402                         pci_write_config_word(tp->pdev,
5403                                               tp->msi_cap + PCI_MSI_FLAGS,
5404                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5405                         val = tr32(MSGINT_MODE);
5406                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5407                 }
5408         }
5409 }
5410
5411 static void tg3_stop_fw(struct tg3 *);
5412
5413 /* tp->lock is held. */
5414 static int tg3_chip_reset(struct tg3 *tp)
5415 {
5416         u32 val;
5417         void (*write_op)(struct tg3 *, u32, u32);
5418         int err;
5419
5420         tg3_nvram_lock(tp);
5421
5422         /* No matching tg3_nvram_unlock() after this because
5423          * chip reset below will undo the nvram lock.
5424          */
5425         tp->nvram_lock_cnt = 0;
5426
5427         /* GRC_MISC_CFG core clock reset will clear the memory
5428          * enable bit in PCI register 4 and the MSI enable bit
5429          * on some chips, so we save relevant registers here.
5430          */
5431         tg3_save_pci_state(tp);
5432
5433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5434             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5435             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5436             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5438                 tw32(GRC_FASTBOOT_PC, 0);
5439
5440         /*
5441          * We must avoid the readl() that normally takes place.
5442          * It locks machines, causes machine checks, and other
5443          * fun things.  So, temporarily disable the 5701
5444          * hardware workaround, while we do the reset.
5445          */
5446         write_op = tp->write32;
5447         if (write_op == tg3_write_flush_reg32)
5448                 tp->write32 = tg3_write32;
5449
5450         /* Prevent the irq handler from reading or writing PCI registers
5451          * during chip reset when the memory enable bit in the PCI command
5452          * register may be cleared.  The chip does not generate interrupt
5453          * at this time, but the irq handler may still be called due to irq
5454          * sharing or irqpoll.
5455          */
5456         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5457         if (tp->hw_status) {
5458                 tp->hw_status->status = 0;
5459                 tp->hw_status->status_tag = 0;
5460         }
5461         tp->last_tag = 0;
5462         smp_mb();
5463         synchronize_irq(tp->pdev->irq);
5464
5465         /* do the reset */
5466         val = GRC_MISC_CFG_CORECLK_RESET;
5467
5468         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5469                 if (tr32(0x7e2c) == 0x60) {
5470                         tw32(0x7e2c, 0x20);
5471                 }
5472                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5473                         tw32(GRC_MISC_CFG, (1 << 29));
5474                         val |= (1 << 29);
5475                 }
5476         }
5477
5478         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5479                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5480                 tw32(GRC_VCPU_EXT_CTRL,
5481                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5482         }
5483
5484         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5485                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5486         tw32(GRC_MISC_CFG, val);
5487
5488         /* restore 5701 hardware bug workaround write method */
5489         tp->write32 = write_op;
5490
5491         /* Unfortunately, we have to delay before the PCI read back.
5492          * Some 575X chips even will not respond to a PCI cfg access
5493          * when the reset command is given to the chip.
5494          *
5495          * How do these hardware designers expect things to work
5496          * properly if the PCI write is posted for a long period
5497          * of time?  It is always necessary to have some method by
5498          * which a register read back can occur to push the write
5499          * out which does the reset.
5500          *
5501          * For most tg3 variants the trick below was working.
5502          * Ho hum...
5503          */
5504         udelay(120);
5505
5506         /* Flush PCI posted writes.  The normal MMIO registers
5507          * are inaccessible at this time so this is the only
5508          * way to make this reliably (actually, this is no longer
5509          * the case, see above).  I tried to use indirect
5510          * register read/write but this upset some 5701 variants.
5511          */
5512         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5513
5514         udelay(120);
5515
5516         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5517                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5518                         int i;
5519                         u32 cfg_val;
5520
5521                         /* Wait for link training to complete.  */
5522                         for (i = 0; i < 5000; i++)
5523                                 udelay(100);
5524
5525                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5526                         pci_write_config_dword(tp->pdev, 0xc4,
5527                                                cfg_val | (1 << 15));
5528                 }
5529                 /* Set PCIE max payload size and clear error status.  */
5530                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5531         }
5532
5533         tg3_restore_pci_state(tp);
5534
5535         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5536
5537         val = 0;
5538         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5539                 val = tr32(MEMARB_MODE);
5540         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5541
5542         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5543                 tg3_stop_fw(tp);
5544                 tw32(0x5000, 0x400);
5545         }
5546
5547         tw32(GRC_MODE, tp->grc_mode);
5548
5549         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5550                 val = tr32(0xc4);
5551
5552                 tw32(0xc4, val | (1 << 15));
5553         }
5554
5555         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5557                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5558                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5559                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5560                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5561         }
5562
5563         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5564                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5565                 tw32_f(MAC_MODE, tp->mac_mode);
5566         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5567                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5568                 tw32_f(MAC_MODE, tp->mac_mode);
5569         } else
5570                 tw32_f(MAC_MODE, 0);
5571         udelay(40);
5572
5573         err = tg3_poll_fw(tp);
5574         if (err)
5575                 return err;
5576
5577         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5578             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5579                 val = tr32(0x7c00);
5580
5581                 tw32(0x7c00, val | (1 << 25));
5582         }
5583
5584         /* Reprobe ASF enable state.  */
5585         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5586         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5587         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5588         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5589                 u32 nic_cfg;
5590
5591                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5592                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5593                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5594                         tp->last_event_jiffies = jiffies;
5595                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5596                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5597                 }
5598         }
5599
5600         return 0;
5601 }
5602
5603 /* tp->lock is held. */
5604 static void tg3_stop_fw(struct tg3 *tp)
5605 {
5606         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5607            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5608                 /* Wait for RX cpu to ACK the previous event. */
5609                 tg3_wait_for_event_ack(tp);
5610
5611                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5612
5613                 tg3_generate_fw_event(tp);
5614
5615                 /* Wait for RX cpu to ACK this event. */
5616                 tg3_wait_for_event_ack(tp);
5617         }
5618 }
5619
5620 /* tp->lock is held. */
5621 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5622 {
5623         int err;
5624
5625         tg3_stop_fw(tp);
5626
5627         tg3_write_sig_pre_reset(tp, kind);
5628
5629         tg3_abort_hw(tp, silent);
5630         err = tg3_chip_reset(tp);
5631
5632         tg3_write_sig_legacy(tp, kind);
5633         tg3_write_sig_post_reset(tp, kind);
5634
5635         if (err)
5636                 return err;
5637
5638         return 0;
5639 }
5640
5641 #define TG3_FW_RELEASE_MAJOR    0x0
5642 #define TG3_FW_RELASE_MINOR     0x0
5643 #define TG3_FW_RELEASE_FIX      0x0
5644 #define TG3_FW_START_ADDR       0x08000000
5645 #define TG3_FW_TEXT_ADDR        0x08000000
5646 #define TG3_FW_TEXT_LEN         0x9c0
5647 #define TG3_FW_RODATA_ADDR      0x080009c0
5648 #define TG3_FW_RODATA_LEN       0x60
5649 #define TG3_FW_DATA_ADDR        0x08000a40
5650 #define TG3_FW_DATA_LEN         0x20
5651 #define TG3_FW_SBSS_ADDR        0x08000a60
5652 #define TG3_FW_SBSS_LEN         0xc
5653 #define TG3_FW_BSS_ADDR         0x08000a70
5654 #define TG3_FW_BSS_LEN          0x10
5655
5656 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5657         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5658         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5659         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5660         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5661         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5662         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5663         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5664         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5665         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5666         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5667         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5668         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5669         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5670         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5671         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5672         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5673         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5674         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5675         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5676         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5677         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5678         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5679         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5680         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5681         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5682         0, 0, 0, 0, 0, 0,
5683         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5684         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5685         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5686         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5687         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5688         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5689         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5690         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5691         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5692         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5693         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5694         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5695         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5696         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5697         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5698         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5699         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5700         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5701         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5702         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5703         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5704         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5705         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5706         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5707         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5708         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5709         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5710         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5711         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5712         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5713         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5714         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5715         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5716         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5717         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5718         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5719         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5720         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5721         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5722         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5723         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5724         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5725         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5726         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5727         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5728         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5729         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5730         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5731         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5732         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5733         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5734         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5735         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5736         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5737         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5738         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5739         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5740         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5741         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5742         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5743         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5744         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5745         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5746         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5747         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5748 };
5749
5750 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5751         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5752         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5753         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5754         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5755         0x00000000
5756 };
5757
5758 #if 0 /* All zeros, don't eat up space with it. */
5759 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5760         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5761         0x00000000, 0x00000000, 0x00000000, 0x00000000
5762 };
5763 #endif
5764
5765 #define RX_CPU_SCRATCH_BASE     0x30000
5766 #define RX_CPU_SCRATCH_SIZE     0x04000
5767 #define TX_CPU_SCRATCH_BASE     0x34000
5768 #define TX_CPU_SCRATCH_SIZE     0x04000
5769
5770 /* tp->lock is held. */
5771 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5772 {
5773         int i;
5774
5775         BUG_ON(offset == TX_CPU_BASE &&
5776             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5777
5778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5779                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5780
5781                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5782                 return 0;
5783         }
5784         if (offset == RX_CPU_BASE) {
5785                 for (i = 0; i < 10000; i++) {
5786                         tw32(offset + CPU_STATE, 0xffffffff);
5787                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5788                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5789                                 break;
5790                 }
5791
5792                 tw32(offset + CPU_STATE, 0xffffffff);
5793                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5794                 udelay(10);
5795         } else {
5796                 for (i = 0; i < 10000; i++) {
5797                         tw32(offset + CPU_STATE, 0xffffffff);
5798                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5799                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5800                                 break;
5801                 }
5802         }
5803
5804         if (i >= 10000) {
5805                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5806                        "and %s CPU\n",
5807                        tp->dev->name,
5808                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5809                 return -ENODEV;
5810         }
5811
5812         /* Clear firmware's nvram arbitration. */
5813         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5814                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5815         return 0;
5816 }
5817
5818 struct fw_info {
5819         unsigned int text_base;
5820         unsigned int text_len;
5821         const u32 *text_data;
5822         unsigned int rodata_base;
5823         unsigned int rodata_len;
5824         const u32 *rodata_data;
5825         unsigned int data_base;
5826         unsigned int data_len;
5827         const u32 *data_data;
5828 };
5829
5830 /* tp->lock is held. */
5831 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5832                                  int cpu_scratch_size, struct fw_info *info)
5833 {
5834         int err, lock_err, i;
5835         void (*write_op)(struct tg3 *, u32, u32);
5836
5837         if (cpu_base == TX_CPU_BASE &&
5838             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5839                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5840                        "TX cpu firmware on %s which is 5705.\n",
5841                        tp->dev->name);
5842                 return -EINVAL;
5843         }
5844
5845         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5846                 write_op = tg3_write_mem;
5847         else
5848                 write_op = tg3_write_indirect_reg32;
5849
5850         /* It is possible that bootcode is still loading at this point.
5851          * Get the nvram lock first before halting the cpu.
5852          */
5853         lock_err = tg3_nvram_lock(tp);
5854         err = tg3_halt_cpu(tp, cpu_base);
5855         if (!lock_err)
5856                 tg3_nvram_unlock(tp);
5857         if (err)
5858                 goto out;
5859
5860         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5861                 write_op(tp, cpu_scratch_base + i, 0);
5862         tw32(cpu_base + CPU_STATE, 0xffffffff);
5863         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5864         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5865                 write_op(tp, (cpu_scratch_base +
5866                               (info->text_base & 0xffff) +
5867                               (i * sizeof(u32))),
5868                          (info->text_data ?
5869                           info->text_data[i] : 0));
5870         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5871                 write_op(tp, (cpu_scratch_base +
5872                               (info->rodata_base & 0xffff) +
5873                               (i * sizeof(u32))),
5874                          (info->rodata_data ?
5875                           info->rodata_data[i] : 0));
5876         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5877                 write_op(tp, (cpu_scratch_base +
5878                               (info->data_base & 0xffff) +
5879                               (i * sizeof(u32))),
5880                          (info->data_data ?
5881                           info->data_data[i] : 0));
5882
5883         err = 0;
5884
5885 out:
5886         return err;
5887 }
5888
5889 /* tp->lock is held. */
5890 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5891 {
5892         struct fw_info info;
5893         int err, i;
5894
5895         info.text_base = TG3_FW_TEXT_ADDR;
5896         info.text_len = TG3_FW_TEXT_LEN;
5897         info.text_data = &tg3FwText[0];
5898         info.rodata_base = TG3_FW_RODATA_ADDR;
5899         info.rodata_len = TG3_FW_RODATA_LEN;
5900         info.rodata_data = &tg3FwRodata[0];
5901         info.data_base = TG3_FW_DATA_ADDR;
5902         info.data_len = TG3_FW_DATA_LEN;
5903         info.data_data = NULL;
5904
5905         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5906                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5907                                     &info);
5908         if (err)
5909                 return err;
5910
5911         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5912                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5913                                     &info);
5914         if (err)
5915                 return err;
5916
5917         /* Now startup only the RX cpu. */
5918         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5919         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5920
5921         for (i = 0; i < 5; i++) {
5922                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5923                         break;
5924                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5925                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5926                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5927                 udelay(1000);
5928         }
5929         if (i >= 5) {
5930                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5931                        "to set RX CPU PC, is %08x should be %08x\n",
5932                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5933                        TG3_FW_TEXT_ADDR);
5934                 return -ENODEV;
5935         }
5936         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5937         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5938
5939         return 0;
5940 }
5941
5942
5943 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5944 #define TG3_TSO_FW_RELASE_MINOR         0x6
5945 #define TG3_TSO_FW_RELEASE_FIX          0x0
5946 #define TG3_TSO_FW_START_ADDR           0x08000000
5947 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5948 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5949 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5950 #define TG3_TSO_FW_RODATA_LEN           0x60
5951 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5952 #define TG3_TSO_FW_DATA_LEN             0x30
5953 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5954 #define TG3_TSO_FW_SBSS_LEN             0x2c
5955 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5956 #define TG3_TSO_FW_BSS_LEN              0x894
5957
5958 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5959         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5960         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5961         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5962         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5963         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5964         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5965         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5966         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5967         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5968         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5969         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5970         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5971         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5972         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5973         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5974         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5975         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5976         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5977         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5978         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5979         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5980         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5981         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5982         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5983         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5984         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5985         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5986         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5987         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5988         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5989         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5990         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5991         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5992         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5993         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5994         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5995         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5996         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5997         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5998         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5999         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6000         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6001         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6002         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6003         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6004         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6005         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6006         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6007         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6008         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6009         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6010         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6011         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6012         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6013         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6014         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6015         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6016         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6017         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6018         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6019         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6020         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6021         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6022         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6023         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6024         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6025         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6026         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6027         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6028         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6029         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6030         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6031         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6032         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6033         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6034         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6035         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6036         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6037         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6038         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6039         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6040         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6041         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6042         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6043         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6044         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6045         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6046         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6047         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6048         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6049         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6050         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6051         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6052         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6053         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6054         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6055         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6056         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6057         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6058         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6059         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6060         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6061         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6062         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6063         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6064         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6065         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6066         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6067         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6068         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6069         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6070         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6071         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6072         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6073         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6074         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6075         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6076         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6077         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6078         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6079         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6080         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6081         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6082         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6083         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6084         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6085         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6086         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6087         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6088         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6089         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6090         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6091         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6092         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6093         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6094         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6095         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6096         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6097         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6098         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6099         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6100         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6101         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6102         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6103         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6104         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6105         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6106         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6107         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6108         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6109         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6110         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6111         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6112         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6113         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6114         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6115         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6116         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6117         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6118         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6119         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6120         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6121         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6122         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6123         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6124         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6125         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6126         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6127         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6128         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6129         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6130         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6131         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6132         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6133         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6134         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6135         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6136         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6137         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6138         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6139         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6140         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6141         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6142         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6143         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6144         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6145         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6146         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6147         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6148         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6149         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6150         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6151         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6152         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6153         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6154         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6155         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6156         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6157         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6158         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6159         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6160         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6161         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6162         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6163         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6164         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6165         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6166         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6167         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6168         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6169         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6170         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6171         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6172         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6173         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6174         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6175         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6176         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6177         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6178         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6179         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6180         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6181         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6182         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6183         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6184         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6185         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6186         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6187         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6188         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6189         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6190         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6191         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6192         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6193         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6194         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6195         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6196         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6197         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6198         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6199         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6200         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6201         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6202         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6203         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6204         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6205         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6206         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6207         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6208         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6209         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6210         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6211         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6212         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6213         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6214         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6215         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6216         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6217         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6218         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6219         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6220         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6221         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6222         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6223         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6224         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6225         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6226         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6227         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6228         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6229         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6230         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6231         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6232         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6233         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6234         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6235         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6236         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6237         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6238         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6239         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6240         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6241         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6242         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6243 };
6244
6245 static const u32 tg3TsoFwRodata[] = {
6246         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6247         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6248         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6249         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6250         0x00000000,
6251 };
6252
6253 static const u32 tg3TsoFwData[] = {
6254         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6255         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6256         0x00000000,
6257 };
6258
6259 /* 5705 needs a special version of the TSO firmware.  */
6260 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6261 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6262 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6263 #define TG3_TSO5_FW_START_ADDR          0x00010000
6264 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6265 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6266 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6267 #define TG3_TSO5_FW_RODATA_LEN          0x50
6268 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6269 #define TG3_TSO5_FW_DATA_LEN            0x20
6270 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6271 #define TG3_TSO5_FW_SBSS_LEN            0x28
6272 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6273 #define TG3_TSO5_FW_BSS_LEN             0x88
6274
6275 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6276         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6277         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6278         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6279         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6280         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6281         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6282         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6283         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6284         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6285         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6286         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6287         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6288         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6289         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6290         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6291         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6292         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6293         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6294         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6295         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6296         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6297         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6298         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6299         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6300         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6301         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6302         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6303         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6304         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6305         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6306         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6307         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6308         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6309         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6310         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6311         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6312         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6313         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6314         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6315         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6316         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6317         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6318         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6319         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6320         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6321         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6322         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6323         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6324         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6325         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6326         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6327         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6328         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6329         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6330         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6331         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6332         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6333         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6334         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6335         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6336         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6337         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6338         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6339         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6340         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6341         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6342         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6343         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6344         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6345         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6346         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6347         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6348         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6349         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6350         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6351         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6352         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6353         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6354         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6355         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6356         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6357         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6358         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6359         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6360         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6361         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6362         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6363         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6364         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6365         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6366         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6367         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6368         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6369         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6370         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6371         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6372         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6373         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6374         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6375         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6376         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6377         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6378         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6379         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6380         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6381         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6382         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6383         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6384         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6385         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6386         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6387         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6388         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6389         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6390         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6391         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6392         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6393         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6394         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6395         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6396         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6397         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6398         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6399         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6400         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6401         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6402         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6403         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6404         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6405         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6406         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6407         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6408         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6409         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6410         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6411         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6412         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6413         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6414         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6415         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6416         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6417         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6418         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6419         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6420         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6421         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6422         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6423         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6424         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6425         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6426         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6427         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6428         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6429         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6430         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6431         0x00000000, 0x00000000, 0x00000000,
6432 };
6433
6434 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6435         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6436         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6437         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6438         0x00000000, 0x00000000, 0x00000000,
6439 };
6440
6441 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6442         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6443         0x00000000, 0x00000000, 0x00000000,
6444 };
6445
6446 /* tp->lock is held. */
6447 static int tg3_load_tso_firmware(struct tg3 *tp)
6448 {
6449         struct fw_info info;
6450         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6451         int err, i;
6452
6453         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6454                 return 0;
6455
6456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6457                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6458                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6459                 info.text_data = &tg3Tso5FwText[0];
6460                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6461                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6462                 info.rodata_data = &tg3Tso5FwRodata[0];
6463                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6464                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6465                 info.data_data = &tg3Tso5FwData[0];
6466                 cpu_base = RX_CPU_BASE;
6467                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6468                 cpu_scratch_size = (info.text_len +
6469                                     info.rodata_len +
6470                                     info.data_len +
6471                                     TG3_TSO5_FW_SBSS_LEN +
6472                                     TG3_TSO5_FW_BSS_LEN);
6473         } else {
6474                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6475                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6476                 info.text_data = &tg3TsoFwText[0];
6477                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6478                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6479                 info.rodata_data = &tg3TsoFwRodata[0];
6480                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6481                 info.data_len = TG3_TSO_FW_DATA_LEN;
6482                 info.data_data = &tg3TsoFwData[0];
6483                 cpu_base = TX_CPU_BASE;
6484                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6485                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6486         }
6487
6488         err = tg3_load_firmware_cpu(tp, cpu_base,
6489                                     cpu_scratch_base, cpu_scratch_size,
6490                                     &info);
6491         if (err)
6492                 return err;
6493
6494         /* Now startup the cpu. */
6495         tw32(cpu_base + CPU_STATE, 0xffffffff);
6496         tw32_f(cpu_base + CPU_PC,    info.text_base);
6497
6498         for (i = 0; i < 5; i++) {
6499                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6500                         break;
6501                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6502                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6503                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6504                 udelay(1000);
6505         }
6506         if (i >= 5) {
6507                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6508                        "to set CPU PC, is %08x should be %08x\n",
6509                        tp->dev->name, tr32(cpu_base + CPU_PC),
6510                        info.text_base);
6511                 return -ENODEV;
6512         }
6513         tw32(cpu_base + CPU_STATE, 0xffffffff);
6514         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6515         return 0;
6516 }
6517
6518
6519 /* tp->lock is held. */
6520 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6521 {
6522         u32 addr_high, addr_low;
6523         int i;
6524
6525         addr_high = ((tp->dev->dev_addr[0] << 8) |
6526                      tp->dev->dev_addr[1]);
6527         addr_low = ((tp->dev->dev_addr[2] << 24) |
6528                     (tp->dev->dev_addr[3] << 16) |
6529                     (tp->dev->dev_addr[4] <<  8) |
6530                     (tp->dev->dev_addr[5] <<  0));
6531         for (i = 0; i < 4; i++) {
6532                 if (i == 1 && skip_mac_1)
6533                         continue;
6534                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6535                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6536         }
6537
6538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6539             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6540                 for (i = 0; i < 12; i++) {
6541                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6542                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6543                 }
6544         }
6545
6546         addr_high = (tp->dev->dev_addr[0] +
6547                      tp->dev->dev_addr[1] +
6548                      tp->dev->dev_addr[2] +
6549                      tp->dev->dev_addr[3] +
6550                      tp->dev->dev_addr[4] +
6551                      tp->dev->dev_addr[5]) &
6552                 TX_BACKOFF_SEED_MASK;
6553         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6554 }
6555
6556 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6557 {
6558         struct tg3 *tp = netdev_priv(dev);
6559         struct sockaddr *addr = p;
6560         int err = 0, skip_mac_1 = 0;
6561
6562         if (!is_valid_ether_addr(addr->sa_data))
6563                 return -EINVAL;
6564
6565         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6566
6567         if (!netif_running(dev))
6568                 return 0;
6569
6570         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6571                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6572
6573                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6574                 addr0_low = tr32(MAC_ADDR_0_LOW);
6575                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6576                 addr1_low = tr32(MAC_ADDR_1_LOW);
6577
6578                 /* Skip MAC addr 1 if ASF is using it. */
6579                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6580                     !(addr1_high == 0 && addr1_low == 0))
6581                         skip_mac_1 = 1;
6582         }
6583         spin_lock_bh(&tp->lock);
6584         __tg3_set_mac_addr(tp, skip_mac_1);
6585         spin_unlock_bh(&tp->lock);
6586
6587         return err;
6588 }
6589
6590 /* tp->lock is held. */
6591 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6592                            dma_addr_t mapping, u32 maxlen_flags,
6593                            u32 nic_addr)
6594 {
6595         tg3_write_mem(tp,
6596                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6597                       ((u64) mapping >> 32));
6598         tg3_write_mem(tp,
6599                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6600                       ((u64) mapping & 0xffffffff));
6601         tg3_write_mem(tp,
6602                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6603                        maxlen_flags);
6604
6605         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6606                 tg3_write_mem(tp,
6607                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6608                               nic_addr);
6609 }
6610
6611 static void __tg3_set_rx_mode(struct net_device *);
6612 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6613 {
6614         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6615         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6616         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6617         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6618         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6619                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6620                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6621         }
6622         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6623         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6624         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6625                 u32 val = ec->stats_block_coalesce_usecs;
6626
6627                 if (!netif_carrier_ok(tp->dev))
6628                         val = 0;
6629
6630                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6631         }
6632 }
6633
6634 /* tp->lock is held. */
6635 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6636 {
6637         u32 val, rdmac_mode;
6638         int i, err, limit;
6639
6640         tg3_disable_ints(tp);
6641
6642         tg3_stop_fw(tp);
6643
6644         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6645
6646         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6647                 tg3_abort_hw(tp, 1);
6648         }
6649
6650         if (reset_phy)
6651                 tg3_phy_reset(tp);
6652
6653         err = tg3_chip_reset(tp);
6654         if (err)
6655                 return err;
6656
6657         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6658
6659         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6660             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6661                 val = tr32(TG3_CPMU_CTRL);
6662                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6663                 tw32(TG3_CPMU_CTRL, val);
6664
6665                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6666                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6667                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6668                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6669
6670                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6671                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6672                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6673                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6674
6675                 val = tr32(TG3_CPMU_HST_ACC);
6676                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6677                 val |= CPMU_HST_ACC_MACCLK_6_25;
6678                 tw32(TG3_CPMU_HST_ACC, val);
6679         }
6680
6681         /* This works around an issue with Athlon chipsets on
6682          * B3 tigon3 silicon.  This bit has no effect on any
6683          * other revision.  But do not set this on PCI Express
6684          * chips and don't even touch the clocks if the CPMU is present.
6685          */
6686         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6687                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6688                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6689                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6690         }
6691
6692         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6693             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6694                 val = tr32(TG3PCI_PCISTATE);
6695                 val |= PCISTATE_RETRY_SAME_DMA;
6696                 tw32(TG3PCI_PCISTATE, val);
6697         }
6698
6699         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6700                 /* Allow reads and writes to the
6701                  * APE register and memory space.
6702                  */
6703                 val = tr32(TG3PCI_PCISTATE);
6704                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6705                        PCISTATE_ALLOW_APE_SHMEM_WR;
6706                 tw32(TG3PCI_PCISTATE, val);
6707         }
6708
6709         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6710                 /* Enable some hw fixes.  */
6711                 val = tr32(TG3PCI_MSI_DATA);
6712                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6713                 tw32(TG3PCI_MSI_DATA, val);
6714         }
6715
6716         /* Descriptor ring init may make accesses to the
6717          * NIC SRAM area to setup the TX descriptors, so we
6718          * can only do this after the hardware has been
6719          * successfully reset.
6720          */
6721         err = tg3_init_rings(tp);
6722         if (err)
6723                 return err;
6724
6725         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6726             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6727                 /* This value is determined during the probe time DMA
6728                  * engine test, tg3_test_dma.
6729                  */
6730                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6731         }
6732
6733         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6734                           GRC_MODE_4X_NIC_SEND_RINGS |
6735                           GRC_MODE_NO_TX_PHDR_CSUM |
6736                           GRC_MODE_NO_RX_PHDR_CSUM);
6737         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6738
6739         /* Pseudo-header checksum is done by hardware logic and not
6740          * the offload processers, so make the chip do the pseudo-
6741          * header checksums on receive.  For transmit it is more
6742          * convenient to do the pseudo-header checksum in software
6743          * as Linux does that on transmit for us in all cases.
6744          */
6745         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6746
6747         tw32(GRC_MODE,
6748              tp->grc_mode |
6749              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6750
6751         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6752         val = tr32(GRC_MISC_CFG);
6753         val &= ~0xff;
6754         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6755         tw32(GRC_MISC_CFG, val);
6756
6757         /* Initialize MBUF/DESC pool. */
6758         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6759                 /* Do nothing.  */
6760         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6761                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6762                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6763                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6764                 else
6765                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6766                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6767                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6768         }
6769         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6770                 int fw_len;
6771
6772                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6773                           TG3_TSO5_FW_RODATA_LEN +
6774                           TG3_TSO5_FW_DATA_LEN +
6775                           TG3_TSO5_FW_SBSS_LEN +
6776                           TG3_TSO5_FW_BSS_LEN);
6777                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6778                 tw32(BUFMGR_MB_POOL_ADDR,
6779                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6780                 tw32(BUFMGR_MB_POOL_SIZE,
6781                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6782         }
6783
6784         if (tp->dev->mtu <= ETH_DATA_LEN) {
6785                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6786                      tp->bufmgr_config.mbuf_read_dma_low_water);
6787                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6788                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6789                 tw32(BUFMGR_MB_HIGH_WATER,
6790                      tp->bufmgr_config.mbuf_high_water);
6791         } else {
6792                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6793                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6794                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6795                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6796                 tw32(BUFMGR_MB_HIGH_WATER,
6797                      tp->bufmgr_config.mbuf_high_water_jumbo);
6798         }
6799         tw32(BUFMGR_DMA_LOW_WATER,
6800              tp->bufmgr_config.dma_low_water);
6801         tw32(BUFMGR_DMA_HIGH_WATER,
6802              tp->bufmgr_config.dma_high_water);
6803
6804         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6805         for (i = 0; i < 2000; i++) {
6806                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6807                         break;
6808                 udelay(10);
6809         }
6810         if (i >= 2000) {
6811                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6812                        tp->dev->name);
6813                 return -ENODEV;
6814         }
6815
6816         /* Setup replenish threshold. */
6817         val = tp->rx_pending / 8;
6818         if (val == 0)
6819                 val = 1;
6820         else if (val > tp->rx_std_max_post)
6821                 val = tp->rx_std_max_post;
6822         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6823                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6824                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6825
6826                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6827                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6828         }
6829
6830         tw32(RCVBDI_STD_THRESH, val);
6831
6832         /* Initialize TG3_BDINFO's at:
6833          *  RCVDBDI_STD_BD:     standard eth size rx ring
6834          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6835          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6836          *
6837          * like so:
6838          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6839          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6840          *                              ring attribute flags
6841          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6842          *
6843          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6844          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6845          *
6846          * The size of each ring is fixed in the firmware, but the location is
6847          * configurable.
6848          */
6849         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6850              ((u64) tp->rx_std_mapping >> 32));
6851         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6852              ((u64) tp->rx_std_mapping & 0xffffffff));
6853         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6854              NIC_SRAM_RX_BUFFER_DESC);
6855
6856         /* Don't even try to program the JUMBO/MINI buffer descriptor
6857          * configs on 5705.
6858          */
6859         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6860                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6861                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6862         } else {
6863                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6864                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6865
6866                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6867                      BDINFO_FLAGS_DISABLED);
6868
6869                 /* Setup replenish threshold. */
6870                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6871
6872                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6873                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6874                              ((u64) tp->rx_jumbo_mapping >> 32));
6875                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6876                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6877                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6878                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6879                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6880                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6881                 } else {
6882                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6883                              BDINFO_FLAGS_DISABLED);
6884                 }
6885
6886         }
6887
6888         /* There is only one send ring on 5705/5750, no need to explicitly
6889          * disable the others.
6890          */
6891         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6892                 /* Clear out send RCB ring in SRAM. */
6893                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6894                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6895                                       BDINFO_FLAGS_DISABLED);
6896         }
6897
6898         tp->tx_prod = 0;
6899         tp->tx_cons = 0;
6900         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6901         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6902
6903         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6904                        tp->tx_desc_mapping,
6905                        (TG3_TX_RING_SIZE <<
6906                         BDINFO_FLAGS_MAXLEN_SHIFT),
6907                        NIC_SRAM_TX_BUFFER_DESC);
6908
6909         /* There is only one receive return ring on 5705/5750, no need
6910          * to explicitly disable the others.
6911          */
6912         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6913                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6914                      i += TG3_BDINFO_SIZE) {
6915                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6916                                       BDINFO_FLAGS_DISABLED);
6917                 }
6918         }
6919
6920         tp->rx_rcb_ptr = 0;
6921         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6922
6923         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6924                        tp->rx_rcb_mapping,
6925                        (TG3_RX_RCB_RING_SIZE(tp) <<
6926                         BDINFO_FLAGS_MAXLEN_SHIFT),
6927                        0);
6928
6929         tp->rx_std_ptr = tp->rx_pending;
6930         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6931                      tp->rx_std_ptr);
6932
6933         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6934                                                 tp->rx_jumbo_pending : 0;
6935         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6936                      tp->rx_jumbo_ptr);
6937
6938         /* Initialize MAC address and backoff seed. */
6939         __tg3_set_mac_addr(tp, 0);
6940
6941         /* MTU + ethernet header + FCS + optional VLAN tag */
6942         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6943
6944         /* The slot time is changed by tg3_setup_phy if we
6945          * run at gigabit with half duplex.
6946          */
6947         tw32(MAC_TX_LENGTHS,
6948              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6949              (6 << TX_LENGTHS_IPG_SHIFT) |
6950              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6951
6952         /* Receive rules. */
6953         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6954         tw32(RCVLPC_CONFIG, 0x0181);
6955
6956         /* Calculate RDMAC_MODE setting early, we need it to determine
6957          * the RCVLPC_STATE_ENABLE mask.
6958          */
6959         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6960                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6961                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6962                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6963                       RDMAC_MODE_LNGREAD_ENAB);
6964
6965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6966                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6967                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6968                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6969
6970         /* If statement applies to 5705 and 5750 PCI devices only */
6971         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6972              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6973             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6974                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6975                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6976                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6977                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6978                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6979                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6980                 }
6981         }
6982
6983         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6984                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6985
6986         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6987                 rdmac_mode |= (1 << 27);
6988
6989         /* Receive/send statistics. */
6990         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6991                 val = tr32(RCVLPC_STATS_ENABLE);
6992                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6993                 tw32(RCVLPC_STATS_ENABLE, val);
6994         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6995                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6996                 val = tr32(RCVLPC_STATS_ENABLE);
6997                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6998                 tw32(RCVLPC_STATS_ENABLE, val);
6999         } else {
7000                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7001         }
7002         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7003         tw32(SNDDATAI_STATSENAB, 0xffffff);
7004         tw32(SNDDATAI_STATSCTRL,
7005              (SNDDATAI_SCTRL_ENABLE |
7006               SNDDATAI_SCTRL_FASTUPD));
7007
7008         /* Setup host coalescing engine. */
7009         tw32(HOSTCC_MODE, 0);
7010         for (i = 0; i < 2000; i++) {
7011                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7012                         break;
7013                 udelay(10);
7014         }
7015
7016         __tg3_set_coalesce(tp, &tp->coal);
7017
7018         /* set status block DMA address */
7019         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7020              ((u64) tp->status_mapping >> 32));
7021         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7022              ((u64) tp->status_mapping & 0xffffffff));
7023
7024         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7025                 /* Status/statistics block address.  See tg3_timer,
7026                  * the tg3_periodic_fetch_stats call there, and
7027                  * tg3_get_stats to see how this works for 5705/5750 chips.
7028                  */
7029                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7030                      ((u64) tp->stats_mapping >> 32));
7031                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7032                      ((u64) tp->stats_mapping & 0xffffffff));
7033                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7034                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7035         }
7036
7037         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7038
7039         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7040         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7041         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7042                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7043
7044         /* Clear statistics/status block in chip, and status block in ram. */
7045         for (i = NIC_SRAM_STATS_BLK;
7046              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7047              i += sizeof(u32)) {
7048                 tg3_write_mem(tp, i, 0);
7049                 udelay(40);
7050         }
7051         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7052
7053         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7054                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7055                 /* reset to prevent losing 1st rx packet intermittently */
7056                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7057                 udelay(10);
7058         }
7059
7060         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7061                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7062         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7063             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7064             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7065                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7066         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7067         udelay(40);
7068
7069         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7070          * If TG3_FLG2_IS_NIC is zero, we should read the
7071          * register to preserve the GPIO settings for LOMs. The GPIOs,
7072          * whether used as inputs or outputs, are set by boot code after
7073          * reset.
7074          */
7075         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7076                 u32 gpio_mask;
7077
7078                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7079                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7080                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7081
7082                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7083                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7084                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7085
7086                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7087                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7088
7089                 tp->grc_local_ctrl &= ~gpio_mask;
7090                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7091
7092                 /* GPIO1 must be driven high for eeprom write protect */
7093                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7094                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7095                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7096         }
7097         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7098         udelay(100);
7099
7100         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7101         tp->last_tag = 0;
7102
7103         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7104                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7105                 udelay(40);
7106         }
7107
7108         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7109                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7110                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7111                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7112                WDMAC_MODE_LNGREAD_ENAB);
7113
7114         /* If statement applies to 5705 and 5750 PCI devices only */
7115         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7116              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7117             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7118                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7119                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7120                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7121                         /* nothing */
7122                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7123                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7124                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7125                         val |= WDMAC_MODE_RX_ACCEL;
7126                 }
7127         }
7128
7129         /* Enable host coalescing bug fix */
7130         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7131             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7132             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7133             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7134                 val |= (1 << 29);
7135
7136         tw32_f(WDMAC_MODE, val);
7137         udelay(40);
7138
7139         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7140                 u16 pcix_cmd;
7141
7142                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7143                                      &pcix_cmd);
7144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7145                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7146                         pcix_cmd |= PCI_X_CMD_READ_2K;
7147                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7148                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7149                         pcix_cmd |= PCI_X_CMD_READ_2K;
7150                 }
7151                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7152                                       pcix_cmd);
7153         }
7154
7155         tw32_f(RDMAC_MODE, rdmac_mode);
7156         udelay(40);
7157
7158         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7159         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7160                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7161
7162         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7163                 tw32(SNDDATAC_MODE,
7164                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7165         else
7166                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7167
7168         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7169         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7170         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7171         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7172         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7173                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7174         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7175         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7176
7177         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7178                 err = tg3_load_5701_a0_firmware_fix(tp);
7179                 if (err)
7180                         return err;
7181         }
7182
7183         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7184                 err = tg3_load_tso_firmware(tp);
7185                 if (err)
7186                         return err;
7187         }
7188
7189         tp->tx_mode = TX_MODE_ENABLE;
7190         tw32_f(MAC_TX_MODE, tp->tx_mode);
7191         udelay(100);
7192
7193         tp->rx_mode = RX_MODE_ENABLE;
7194         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7195             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7196                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7197
7198         tw32_f(MAC_RX_MODE, tp->rx_mode);
7199         udelay(10);
7200
7201         if (tp->link_config.phy_is_low_power) {
7202                 tp->link_config.phy_is_low_power = 0;
7203                 tp->link_config.speed = tp->link_config.orig_speed;
7204                 tp->link_config.duplex = tp->link_config.orig_duplex;
7205                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7206         }
7207
7208         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7209         tw32_f(MAC_MI_MODE, tp->mi_mode);
7210         udelay(80);
7211
7212         tw32(MAC_LED_CTRL, tp->led_ctrl);
7213
7214         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7215         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7216                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7217                 udelay(10);
7218         }
7219         tw32_f(MAC_RX_MODE, tp->rx_mode);
7220         udelay(10);
7221
7222         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7223                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7224                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7225                         /* Set drive transmission level to 1.2V  */
7226                         /* only if the signal pre-emphasis bit is not set  */
7227                         val = tr32(MAC_SERDES_CFG);
7228                         val &= 0xfffff000;
7229                         val |= 0x880;
7230                         tw32(MAC_SERDES_CFG, val);
7231                 }
7232                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7233                         tw32(MAC_SERDES_CFG, 0x616000);
7234         }
7235
7236         /* Prevent chip from dropping frames when flow control
7237          * is enabled.
7238          */
7239         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7240
7241         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7242             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7243                 /* Use hardware link auto-negotiation */
7244                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7245         }
7246
7247         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7248             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7249                 u32 tmp;
7250
7251                 tmp = tr32(SERDES_RX_CTRL);
7252                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7253                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7254                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7255                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7256         }
7257
7258         err = tg3_setup_phy(tp, 0);
7259         if (err)
7260                 return err;
7261
7262         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7263             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7264                 u32 tmp;
7265
7266                 /* Clear CRC stats. */
7267                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7268                         tg3_writephy(tp, MII_TG3_TEST1,
7269                                      tmp | MII_TG3_TEST1_CRC_EN);
7270                         tg3_readphy(tp, 0x14, &tmp);
7271                 }
7272         }
7273
7274         __tg3_set_rx_mode(tp->dev);
7275
7276         /* Initialize receive rules. */
7277         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7278         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7279         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7280         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7281
7282         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7283             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7284                 limit = 8;
7285         else
7286                 limit = 16;
7287         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7288                 limit -= 4;
7289         switch (limit) {
7290         case 16:
7291                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7292         case 15:
7293                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7294         case 14:
7295                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7296         case 13:
7297                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7298         case 12:
7299                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7300         case 11:
7301                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7302         case 10:
7303                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7304         case 9:
7305                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7306         case 8:
7307                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7308         case 7:
7309                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7310         case 6:
7311                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7312         case 5:
7313                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7314         case 4:
7315                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7316         case 3:
7317                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7318         case 2:
7319         case 1:
7320
7321         default:
7322                 break;
7323         };
7324
7325         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7326                 /* Write our heartbeat update interval to APE. */
7327                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7328                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7329
7330         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7331
7332         return 0;
7333 }
7334
7335 /* Called at device open time to get the chip ready for
7336  * packet processing.  Invoked with tp->lock held.
7337  */
7338 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7339 {
7340         int err;
7341
7342         /* Force the chip into D0. */
7343         err = tg3_set_power_state(tp, PCI_D0);
7344         if (err)
7345                 goto out;
7346
7347         tg3_switch_clocks(tp);
7348
7349         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7350
7351         err = tg3_reset_hw(tp, reset_phy);
7352
7353 out:
7354         return err;
7355 }
7356
7357 #define TG3_STAT_ADD32(PSTAT, REG) \
7358 do {    u32 __val = tr32(REG); \
7359         (PSTAT)->low += __val; \
7360         if ((PSTAT)->low < __val) \
7361                 (PSTAT)->high += 1; \
7362 } while (0)
7363
7364 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7365 {
7366         struct tg3_hw_stats *sp = tp->hw_stats;
7367
7368         if (!netif_carrier_ok(tp->dev))
7369                 return;
7370
7371         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7372         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7373         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7374         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7375         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7376         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7377         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7378         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7379         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7380         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7381         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7382         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7383         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7384
7385         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7386         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7387         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7388         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7389         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7390         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7391         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7392         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7393         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7394         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7395         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7396         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7397         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7398         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7399
7400         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7401         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7402         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7403 }
7404
7405 static void tg3_timer(unsigned long __opaque)
7406 {
7407         struct tg3 *tp = (struct tg3 *) __opaque;
7408
7409         if (tp->irq_sync)
7410                 goto restart_timer;
7411
7412         spin_lock(&tp->lock);
7413
7414         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7415                 /* All of this garbage is because when using non-tagged
7416                  * IRQ status the mailbox/status_block protocol the chip
7417                  * uses with the cpu is race prone.
7418                  */
7419                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7420                         tw32(GRC_LOCAL_CTRL,
7421                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7422                 } else {
7423                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7424                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7425                 }
7426
7427                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7428                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7429                         spin_unlock(&tp->lock);
7430                         schedule_work(&tp->reset_task);
7431                         return;
7432                 }
7433         }
7434
7435         /* This part only runs once per second. */
7436         if (!--tp->timer_counter) {
7437                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7438                         tg3_periodic_fetch_stats(tp);
7439
7440                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7441                         u32 mac_stat;
7442                         int phy_event;
7443
7444                         mac_stat = tr32(MAC_STATUS);
7445
7446                         phy_event = 0;
7447                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7448                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7449                                         phy_event = 1;
7450                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7451                                 phy_event = 1;
7452
7453                         if (phy_event)
7454                                 tg3_setup_phy(tp, 0);
7455                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7456                         u32 mac_stat = tr32(MAC_STATUS);
7457                         int need_setup = 0;
7458
7459                         if (netif_carrier_ok(tp->dev) &&
7460                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7461                                 need_setup = 1;
7462                         }
7463                         if (! netif_carrier_ok(tp->dev) &&
7464                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7465                                          MAC_STATUS_SIGNAL_DET))) {
7466                                 need_setup = 1;
7467                         }
7468                         if (need_setup) {
7469                                 if (!tp->serdes_counter) {
7470                                         tw32_f(MAC_MODE,
7471                                              (tp->mac_mode &
7472                                               ~MAC_MODE_PORT_MODE_MASK));
7473                                         udelay(40);
7474                                         tw32_f(MAC_MODE, tp->mac_mode);
7475                                         udelay(40);
7476                                 }
7477                                 tg3_setup_phy(tp, 0);
7478                         }
7479                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7480                         tg3_serdes_parallel_detect(tp);
7481
7482                 tp->timer_counter = tp->timer_multiplier;
7483         }
7484
7485         /* Heartbeat is only sent once every 2 seconds.
7486          *
7487          * The heartbeat is to tell the ASF firmware that the host
7488          * driver is still alive.  In the event that the OS crashes,
7489          * ASF needs to reset the hardware to free up the FIFO space
7490          * that may be filled with rx packets destined for the host.
7491          * If the FIFO is full, ASF will no longer function properly.
7492          *
7493          * Unintended resets have been reported on real time kernels
7494          * where the timer doesn't run on time.  Netpoll will also have
7495          * same problem.
7496          *
7497          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7498          * to check the ring condition when the heartbeat is expiring
7499          * before doing the reset.  This will prevent most unintended
7500          * resets.
7501          */
7502         if (!--tp->asf_counter) {
7503                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7504                         tg3_wait_for_event_ack(tp);
7505
7506                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7507                                       FWCMD_NICDRV_ALIVE3);
7508                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7509                         /* 5 seconds timeout */
7510                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7511
7512                         tg3_generate_fw_event(tp);
7513                 }
7514                 tp->asf_counter = tp->asf_multiplier;
7515         }
7516
7517         spin_unlock(&tp->lock);
7518
7519 restart_timer:
7520         tp->timer.expires = jiffies + tp->timer_offset;
7521         add_timer(&tp->timer);
7522 }
7523
7524 static int tg3_request_irq(struct tg3 *tp)
7525 {
7526         irq_handler_t fn;
7527         unsigned long flags;
7528         struct net_device *dev = tp->dev;
7529
7530         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7531                 fn = tg3_msi;
7532                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7533                         fn = tg3_msi_1shot;
7534                 flags = IRQF_SAMPLE_RANDOM;
7535         } else {
7536                 fn = tg3_interrupt;
7537                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7538                         fn = tg3_interrupt_tagged;
7539                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7540         }
7541         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7542 }
7543
7544 static int tg3_test_interrupt(struct tg3 *tp)
7545 {
7546         struct net_device *dev = tp->dev;
7547         int err, i, intr_ok = 0;
7548
7549         if (!netif_running(dev))
7550                 return -ENODEV;
7551
7552         tg3_disable_ints(tp);
7553
7554         free_irq(tp->pdev->irq, dev);
7555
7556         err = request_irq(tp->pdev->irq, tg3_test_isr,
7557                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7558         if (err)
7559                 return err;
7560
7561         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7562         tg3_enable_ints(tp);
7563
7564         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7565                HOSTCC_MODE_NOW);
7566
7567         for (i = 0; i < 5; i++) {
7568                 u32 int_mbox, misc_host_ctrl;
7569
7570                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7571                                         TG3_64BIT_REG_LOW);
7572                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7573
7574                 if ((int_mbox != 0) ||
7575                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7576                         intr_ok = 1;
7577                         break;
7578                 }
7579
7580                 msleep(10);
7581         }
7582
7583         tg3_disable_ints(tp);
7584
7585         free_irq(tp->pdev->irq, dev);
7586
7587         err = tg3_request_irq(tp);
7588
7589         if (err)
7590                 return err;
7591
7592         if (intr_ok)
7593                 return 0;
7594
7595         return -EIO;
7596 }
7597
7598 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7599  * successfully restored
7600  */
7601 static int tg3_test_msi(struct tg3 *tp)
7602 {
7603         struct net_device *dev = tp->dev;
7604         int err;
7605         u16 pci_cmd;
7606
7607         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7608                 return 0;
7609
7610         /* Turn off SERR reporting in case MSI terminates with Master
7611          * Abort.
7612          */
7613         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7614         pci_write_config_word(tp->pdev, PCI_COMMAND,
7615                               pci_cmd & ~PCI_COMMAND_SERR);
7616
7617         err = tg3_test_interrupt(tp);
7618
7619         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7620
7621         if (!err)
7622                 return 0;
7623
7624         /* other failures */
7625         if (err != -EIO)
7626                 return err;
7627
7628         /* MSI test failed, go back to INTx mode */
7629         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7630                "switching to INTx mode. Please report this failure to "
7631                "the PCI maintainer and include system chipset information.\n",
7632                        tp->dev->name);
7633
7634         free_irq(tp->pdev->irq, dev);
7635         pci_disable_msi(tp->pdev);
7636
7637         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7638
7639         err = tg3_request_irq(tp);
7640         if (err)
7641                 return err;
7642
7643         /* Need to reset the chip because the MSI cycle may have terminated
7644          * with Master Abort.
7645          */
7646         tg3_full_lock(tp, 1);
7647
7648         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7649         err = tg3_init_hw(tp, 1);
7650
7651         tg3_full_unlock(tp);
7652
7653         if (err)
7654                 free_irq(tp->pdev->irq, dev);
7655
7656         return err;
7657 }
7658
7659 static int tg3_open(struct net_device *dev)
7660 {
7661         struct tg3 *tp = netdev_priv(dev);
7662         int err;
7663
7664         netif_carrier_off(tp->dev);
7665
7666         tg3_full_lock(tp, 0);
7667
7668         err = tg3_set_power_state(tp, PCI_D0);
7669         if (err) {
7670                 tg3_full_unlock(tp);
7671                 return err;
7672         }
7673
7674         tg3_disable_ints(tp);
7675         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7676
7677         tg3_full_unlock(tp);
7678
7679         /* The placement of this call is tied
7680          * to the setup and use of Host TX descriptors.
7681          */
7682         err = tg3_alloc_consistent(tp);
7683         if (err)
7684                 return err;
7685
7686         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7687                 /* All MSI supporting chips should support tagged
7688                  * status.  Assert that this is the case.
7689                  */
7690                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7691                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7692                                "Not using MSI.\n", tp->dev->name);
7693                 } else if (pci_enable_msi(tp->pdev) == 0) {
7694                         u32 msi_mode;
7695
7696                         msi_mode = tr32(MSGINT_MODE);
7697                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7698                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7699                 }
7700         }
7701         err = tg3_request_irq(tp);
7702
7703         if (err) {
7704                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7705                         pci_disable_msi(tp->pdev);
7706                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7707                 }
7708                 tg3_free_consistent(tp);
7709                 return err;
7710         }
7711
7712         napi_enable(&tp->napi);
7713
7714         tg3_full_lock(tp, 0);
7715
7716         err = tg3_init_hw(tp, 1);
7717         if (err) {
7718                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7719                 tg3_free_rings(tp);
7720         } else {
7721                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7722                         tp->timer_offset = HZ;
7723                 else
7724                         tp->timer_offset = HZ / 10;
7725
7726                 BUG_ON(tp->timer_offset > HZ);
7727                 tp->timer_counter = tp->timer_multiplier =
7728                         (HZ / tp->timer_offset);
7729                 tp->asf_counter = tp->asf_multiplier =
7730                         ((HZ / tp->timer_offset) * 2);
7731
7732                 init_timer(&tp->timer);
7733                 tp->timer.expires = jiffies + tp->timer_offset;
7734                 tp->timer.data = (unsigned long) tp;
7735                 tp->timer.function = tg3_timer;
7736         }
7737
7738         tg3_full_unlock(tp);
7739
7740         if (err) {
7741                 napi_disable(&tp->napi);
7742                 free_irq(tp->pdev->irq, dev);
7743                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7744                         pci_disable_msi(tp->pdev);
7745                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7746                 }
7747                 tg3_free_consistent(tp);
7748                 return err;
7749         }
7750
7751         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7752                 err = tg3_test_msi(tp);
7753
7754                 if (err) {
7755                         tg3_full_lock(tp, 0);
7756
7757                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7758                                 pci_disable_msi(tp->pdev);
7759                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7760                         }
7761                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7762                         tg3_free_rings(tp);
7763                         tg3_free_consistent(tp);
7764
7765                         tg3_full_unlock(tp);
7766
7767                         napi_disable(&tp->napi);
7768
7769                         return err;
7770                 }
7771
7772                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7773                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7774                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7775
7776                                 tw32(PCIE_TRANSACTION_CFG,
7777                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7778                         }
7779                 }
7780         }
7781
7782         tg3_full_lock(tp, 0);
7783
7784         add_timer(&tp->timer);
7785         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7786         tg3_enable_ints(tp);
7787
7788         tg3_full_unlock(tp);
7789
7790         netif_start_queue(dev);
7791
7792         return 0;
7793 }
7794
7795 #if 0
7796 /*static*/ void tg3_dump_state(struct tg3 *tp)
7797 {
7798         u32 val32, val32_2, val32_3, val32_4, val32_5;
7799         u16 val16;
7800         int i;
7801
7802         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7803         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7804         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7805                val16, val32);
7806
7807         /* MAC block */
7808         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7809                tr32(MAC_MODE), tr32(MAC_STATUS));
7810         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7811                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7812         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7813                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7814         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7815                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7816
7817         /* Send data initiator control block */
7818         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7819                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7820         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7821                tr32(SNDDATAI_STATSCTRL));
7822
7823         /* Send data completion control block */
7824         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7825
7826         /* Send BD ring selector block */
7827         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7828                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7829
7830         /* Send BD initiator control block */
7831         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7832                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7833
7834         /* Send BD completion control block */
7835         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7836
7837         /* Receive list placement control block */
7838         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7839                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7840         printk("       RCVLPC_STATSCTRL[%08x]\n",
7841                tr32(RCVLPC_STATSCTRL));
7842
7843         /* Receive data and receive BD initiator control block */
7844         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7845                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7846
7847         /* Receive data completion control block */
7848         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7849                tr32(RCVDCC_MODE));
7850
7851         /* Receive BD initiator control block */
7852         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7853                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7854
7855         /* Receive BD completion control block */
7856         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7857                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7858
7859         /* Receive list selector control block */
7860         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7861                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7862
7863         /* Mbuf cluster free block */
7864         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7865                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7866
7867         /* Host coalescing control block */
7868         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7869                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7870         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7871                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7872                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7873         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7874                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7875                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7876         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7877                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7878         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7879                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7880
7881         /* Memory arbiter control block */
7882         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7883                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7884
7885         /* Buffer manager control block */
7886         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7887                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7888         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7889                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7890         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7891                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7892                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7893                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7894
7895         /* Read DMA control block */
7896         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7897                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7898
7899         /* Write DMA control block */
7900         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7901                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7902
7903         /* DMA completion block */
7904         printk("DEBUG: DMAC_MODE[%08x]\n",
7905                tr32(DMAC_MODE));
7906
7907         /* GRC block */
7908         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7909                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7910         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7911                tr32(GRC_LOCAL_CTRL));
7912
7913         /* TG3_BDINFOs */
7914         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7915                tr32(RCVDBDI_JUMBO_BD + 0x0),
7916                tr32(RCVDBDI_JUMBO_BD + 0x4),
7917                tr32(RCVDBDI_JUMBO_BD + 0x8),
7918                tr32(RCVDBDI_JUMBO_BD + 0xc));
7919         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7920                tr32(RCVDBDI_STD_BD + 0x0),
7921                tr32(RCVDBDI_STD_BD + 0x4),
7922                tr32(RCVDBDI_STD_BD + 0x8),
7923                tr32(RCVDBDI_STD_BD + 0xc));
7924         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7925                tr32(RCVDBDI_MINI_BD + 0x0),
7926                tr32(RCVDBDI_MINI_BD + 0x4),
7927                tr32(RCVDBDI_MINI_BD + 0x8),
7928                tr32(RCVDBDI_MINI_BD + 0xc));
7929
7930         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7931         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7932         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7933         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7934         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7935                val32, val32_2, val32_3, val32_4);
7936
7937         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7938         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7939         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7940         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7941         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7942                val32, val32_2, val32_3, val32_4);
7943
7944         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7945         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7946         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7947         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7948         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7949         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7950                val32, val32_2, val32_3, val32_4, val32_5);
7951
7952         /* SW status block */
7953         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7954                tp->hw_status->status,
7955                tp->hw_status->status_tag,
7956                tp->hw_status->rx_jumbo_consumer,
7957                tp->hw_status->rx_consumer,
7958                tp->hw_status->rx_mini_consumer,
7959                tp->hw_status->idx[0].rx_producer,
7960                tp->hw_status->idx[0].tx_consumer);
7961
7962         /* SW statistics block */
7963         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7964                ((u32 *)tp->hw_stats)[0],
7965                ((u32 *)tp->hw_stats)[1],
7966                ((u32 *)tp->hw_stats)[2],
7967                ((u32 *)tp->hw_stats)[3]);
7968
7969         /* Mailboxes */
7970         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7971                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7972                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7973                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7974                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7975
7976         /* NIC side send descriptors. */
7977         for (i = 0; i < 6; i++) {
7978                 unsigned long txd;
7979
7980                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7981                         + (i * sizeof(struct tg3_tx_buffer_desc));
7982                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7983                        i,
7984                        readl(txd + 0x0), readl(txd + 0x4),
7985                        readl(txd + 0x8), readl(txd + 0xc));
7986         }
7987
7988         /* NIC side RX descriptors. */
7989         for (i = 0; i < 6; i++) {
7990                 unsigned long rxd;
7991
7992                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7993                         + (i * sizeof(struct tg3_rx_buffer_desc));
7994                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7995                        i,
7996                        readl(rxd + 0x0), readl(rxd + 0x4),
7997                        readl(rxd + 0x8), readl(rxd + 0xc));
7998                 rxd += (4 * sizeof(u32));
7999                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8000                        i,
8001                        readl(rxd + 0x0), readl(rxd + 0x4),
8002                        readl(rxd + 0x8), readl(rxd + 0xc));
8003         }
8004
8005         for (i = 0; i < 6; i++) {
8006                 unsigned long rxd;
8007
8008                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8009                         + (i * sizeof(struct tg3_rx_buffer_desc));
8010                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8011                        i,
8012                        readl(rxd + 0x0), readl(rxd + 0x4),
8013                        readl(rxd + 0x8), readl(rxd + 0xc));
8014                 rxd += (4 * sizeof(u32));
8015                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8016                        i,
8017                        readl(rxd + 0x0), readl(rxd + 0x4),
8018                        readl(rxd + 0x8), readl(rxd + 0xc));
8019         }
8020 }
8021 #endif
8022
8023 static struct net_device_stats *tg3_get_stats(struct net_device *);
8024 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8025
8026 static int tg3_close(struct net_device *dev)
8027 {
8028         struct tg3 *tp = netdev_priv(dev);
8029
8030         napi_disable(&tp->napi);
8031         cancel_work_sync(&tp->reset_task);
8032
8033         netif_stop_queue(dev);
8034
8035         del_timer_sync(&tp->timer);
8036
8037         tg3_full_lock(tp, 1);
8038 #if 0
8039         tg3_dump_state(tp);
8040 #endif
8041
8042         tg3_disable_ints(tp);
8043
8044         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8045         tg3_free_rings(tp);
8046         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8047
8048         tg3_full_unlock(tp);
8049
8050         free_irq(tp->pdev->irq, dev);
8051         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8052                 pci_disable_msi(tp->pdev);
8053                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8054         }
8055
8056         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8057                sizeof(tp->net_stats_prev));
8058         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8059                sizeof(tp->estats_prev));
8060
8061         tg3_free_consistent(tp);
8062
8063         tg3_set_power_state(tp, PCI_D3hot);
8064
8065         netif_carrier_off(tp->dev);
8066
8067         return 0;
8068 }
8069
8070 static inline unsigned long get_stat64(tg3_stat64_t *val)
8071 {
8072         unsigned long ret;
8073
8074 #if (BITS_PER_LONG == 32)
8075         ret = val->low;
8076 #else
8077         ret = ((u64)val->high << 32) | ((u64)val->low);
8078 #endif
8079         return ret;
8080 }
8081
8082 static unsigned long calc_crc_errors(struct tg3 *tp)
8083 {
8084         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8085
8086         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8087             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8088              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8089                 u32 val;
8090
8091                 spin_lock_bh(&tp->lock);
8092                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8093                         tg3_writephy(tp, MII_TG3_TEST1,
8094                                      val | MII_TG3_TEST1_CRC_EN);
8095                         tg3_readphy(tp, 0x14, &val);
8096                 } else
8097                         val = 0;
8098                 spin_unlock_bh(&tp->lock);
8099
8100                 tp->phy_crc_errors += val;
8101
8102                 return tp->phy_crc_errors;
8103         }
8104
8105         return get_stat64(&hw_stats->rx_fcs_errors);
8106 }
8107
8108 #define ESTAT_ADD(member) \
8109         estats->member =        old_estats->member + \
8110                                 get_stat64(&hw_stats->member)
8111
8112 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8113 {
8114         struct tg3_ethtool_stats *estats = &tp->estats;
8115         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8116         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8117
8118         if (!hw_stats)
8119                 return old_estats;
8120
8121         ESTAT_ADD(rx_octets);
8122         ESTAT_ADD(rx_fragments);
8123         ESTAT_ADD(rx_ucast_packets);
8124         ESTAT_ADD(rx_mcast_packets);
8125         ESTAT_ADD(rx_bcast_packets);
8126         ESTAT_ADD(rx_fcs_errors);
8127         ESTAT_ADD(rx_align_errors);
8128         ESTAT_ADD(rx_xon_pause_rcvd);
8129         ESTAT_ADD(rx_xoff_pause_rcvd);
8130         ESTAT_ADD(rx_mac_ctrl_rcvd);
8131         ESTAT_ADD(rx_xoff_entered);
8132         ESTAT_ADD(rx_frame_too_long_errors);
8133         ESTAT_ADD(rx_jabbers);
8134         ESTAT_ADD(rx_undersize_packets);
8135         ESTAT_ADD(rx_in_length_errors);
8136         ESTAT_ADD(rx_out_length_errors);
8137         ESTAT_ADD(rx_64_or_less_octet_packets);
8138         ESTAT_ADD(rx_65_to_127_octet_packets);
8139         ESTAT_ADD(rx_128_to_255_octet_packets);
8140         ESTAT_ADD(rx_256_to_511_octet_packets);
8141         ESTAT_ADD(rx_512_to_1023_octet_packets);
8142         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8143         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8144         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8145         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8146         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8147
8148         ESTAT_ADD(tx_octets);
8149         ESTAT_ADD(tx_collisions);
8150         ESTAT_ADD(tx_xon_sent);
8151         ESTAT_ADD(tx_xoff_sent);
8152         ESTAT_ADD(tx_flow_control);
8153         ESTAT_ADD(tx_mac_errors);
8154         ESTAT_ADD(tx_single_collisions);
8155         ESTAT_ADD(tx_mult_collisions);
8156         ESTAT_ADD(tx_deferred);
8157         ESTAT_ADD(tx_excessive_collisions);
8158         ESTAT_ADD(tx_late_collisions);
8159         ESTAT_ADD(tx_collide_2times);
8160         ESTAT_ADD(tx_collide_3times);
8161         ESTAT_ADD(tx_collide_4times);
8162         ESTAT_ADD(tx_collide_5times);
8163         ESTAT_ADD(tx_collide_6times);
8164         ESTAT_ADD(tx_collide_7times);
8165         ESTAT_ADD(tx_collide_8times);
8166         ESTAT_ADD(tx_collide_9times);
8167         ESTAT_ADD(tx_collide_10times);
8168         ESTAT_ADD(tx_collide_11times);
8169         ESTAT_ADD(tx_collide_12times);
8170         ESTAT_ADD(tx_collide_13times);
8171         ESTAT_ADD(tx_collide_14times);
8172         ESTAT_ADD(tx_collide_15times);
8173         ESTAT_ADD(tx_ucast_packets);
8174         ESTAT_ADD(tx_mcast_packets);
8175         ESTAT_ADD(tx_bcast_packets);
8176         ESTAT_ADD(tx_carrier_sense_errors);
8177         ESTAT_ADD(tx_discards);
8178         ESTAT_ADD(tx_errors);
8179
8180         ESTAT_ADD(dma_writeq_full);
8181         ESTAT_ADD(dma_write_prioq_full);
8182         ESTAT_ADD(rxbds_empty);
8183         ESTAT_ADD(rx_discards);
8184         ESTAT_ADD(rx_errors);
8185         ESTAT_ADD(rx_threshold_hit);
8186
8187         ESTAT_ADD(dma_readq_full);
8188         ESTAT_ADD(dma_read_prioq_full);
8189         ESTAT_ADD(tx_comp_queue_full);
8190
8191         ESTAT_ADD(ring_set_send_prod_index);
8192         ESTAT_ADD(ring_status_update);
8193         ESTAT_ADD(nic_irqs);
8194         ESTAT_ADD(nic_avoided_irqs);
8195         ESTAT_ADD(nic_tx_threshold_hit);
8196
8197         return estats;
8198 }
8199
8200 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8201 {
8202         struct tg3 *tp = netdev_priv(dev);
8203         struct net_device_stats *stats = &tp->net_stats;
8204         struct net_device_stats *old_stats = &tp->net_stats_prev;
8205         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8206
8207         if (!hw_stats)
8208                 return old_stats;
8209
8210         stats->rx_packets = old_stats->rx_packets +
8211                 get_stat64(&hw_stats->rx_ucast_packets) +
8212                 get_stat64(&hw_stats->rx_mcast_packets) +
8213                 get_stat64(&hw_stats->rx_bcast_packets);
8214
8215         stats->tx_packets = old_stats->tx_packets +
8216                 get_stat64(&hw_stats->tx_ucast_packets) +
8217                 get_stat64(&hw_stats->tx_mcast_packets) +
8218                 get_stat64(&hw_stats->tx_bcast_packets);
8219
8220         stats->rx_bytes = old_stats->rx_bytes +
8221                 get_stat64(&hw_stats->rx_octets);
8222         stats->tx_bytes = old_stats->tx_bytes +
8223                 get_stat64(&hw_stats->tx_octets);
8224
8225         stats->rx_errors = old_stats->rx_errors +
8226                 get_stat64(&hw_stats->rx_errors);
8227         stats->tx_errors = old_stats->tx_errors +
8228                 get_stat64(&hw_stats->tx_errors) +
8229                 get_stat64(&hw_stats->tx_mac_errors) +
8230                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8231                 get_stat64(&hw_stats->tx_discards);
8232
8233         stats->multicast = old_stats->multicast +
8234                 get_stat64(&hw_stats->rx_mcast_packets);
8235         stats->collisions = old_stats->collisions +
8236                 get_stat64(&hw_stats->tx_collisions);
8237
8238         stats->rx_length_errors = old_stats->rx_length_errors +
8239                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8240                 get_stat64(&hw_stats->rx_undersize_packets);
8241
8242         stats->rx_over_errors = old_stats->rx_over_errors +
8243                 get_stat64(&hw_stats->rxbds_empty);
8244         stats->rx_frame_errors = old_stats->rx_frame_errors +
8245                 get_stat64(&hw_stats->rx_align_errors);
8246         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8247                 get_stat64(&hw_stats->tx_discards);
8248         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8249                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8250
8251         stats->rx_crc_errors = old_stats->rx_crc_errors +
8252                 calc_crc_errors(tp);
8253
8254         stats->rx_missed_errors = old_stats->rx_missed_errors +
8255                 get_stat64(&hw_stats->rx_discards);
8256
8257         return stats;
8258 }
8259
8260 static inline u32 calc_crc(unsigned char *buf, int len)
8261 {
8262         u32 reg;
8263         u32 tmp;
8264         int j, k;
8265
8266         reg = 0xffffffff;
8267
8268         for (j = 0; j < len; j++) {
8269                 reg ^= buf[j];
8270
8271                 for (k = 0; k < 8; k++) {
8272                         tmp = reg & 0x01;
8273
8274                         reg >>= 1;
8275
8276                         if (tmp) {
8277                                 reg ^= 0xedb88320;
8278                         }
8279                 }
8280         }
8281
8282         return ~reg;
8283 }
8284
8285 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8286 {
8287         /* accept or reject all multicast frames */
8288         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8289         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8290         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8291         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8292 }
8293
8294 static void __tg3_set_rx_mode(struct net_device *dev)
8295 {
8296         struct tg3 *tp = netdev_priv(dev);
8297         u32 rx_mode;
8298
8299         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8300                                   RX_MODE_KEEP_VLAN_TAG);
8301
8302         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8303          * flag clear.
8304          */
8305 #if TG3_VLAN_TAG_USED
8306         if (!tp->vlgrp &&
8307             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8308                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8309 #else
8310         /* By definition, VLAN is disabled always in this
8311          * case.
8312          */
8313         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8314                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8315 #endif
8316
8317         if (dev->flags & IFF_PROMISC) {
8318                 /* Promiscuous mode. */
8319                 rx_mode |= RX_MODE_PROMISC;
8320         } else if (dev->flags & IFF_ALLMULTI) {
8321                 /* Accept all multicast. */
8322                 tg3_set_multi (tp, 1);
8323         } else if (dev->mc_count < 1) {
8324                 /* Reject all multicast. */
8325                 tg3_set_multi (tp, 0);
8326         } else {
8327                 /* Accept one or more multicast(s). */
8328                 struct dev_mc_list *mclist;
8329                 unsigned int i;
8330                 u32 mc_filter[4] = { 0, };
8331                 u32 regidx;
8332                 u32 bit;
8333                 u32 crc;
8334
8335                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8336                      i++, mclist = mclist->next) {
8337
8338                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8339                         bit = ~crc & 0x7f;
8340                         regidx = (bit & 0x60) >> 5;
8341                         bit &= 0x1f;
8342                         mc_filter[regidx] |= (1 << bit);
8343                 }
8344
8345                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8346                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8347                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8348                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8349         }
8350
8351         if (rx_mode != tp->rx_mode) {
8352                 tp->rx_mode = rx_mode;
8353                 tw32_f(MAC_RX_MODE, rx_mode);
8354                 udelay(10);
8355         }
8356 }
8357
8358 static void tg3_set_rx_mode(struct net_device *dev)
8359 {
8360         struct tg3 *tp = netdev_priv(dev);
8361
8362         if (!netif_running(dev))
8363                 return;
8364
8365         tg3_full_lock(tp, 0);
8366         __tg3_set_rx_mode(dev);
8367         tg3_full_unlock(tp);
8368 }
8369
8370 #define TG3_REGDUMP_LEN         (32 * 1024)
8371
8372 static int tg3_get_regs_len(struct net_device *dev)
8373 {
8374         return TG3_REGDUMP_LEN;
8375 }
8376
8377 static void tg3_get_regs(struct net_device *dev,
8378                 struct ethtool_regs *regs, void *_p)
8379 {
8380         u32 *p = _p;
8381         struct tg3 *tp = netdev_priv(dev);
8382         u8 *orig_p = _p;
8383         int i;
8384
8385         regs->version = 0;
8386
8387         memset(p, 0, TG3_REGDUMP_LEN);
8388
8389         if (tp->link_config.phy_is_low_power)
8390                 return;
8391
8392         tg3_full_lock(tp, 0);
8393
8394 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8395 #define GET_REG32_LOOP(base,len)                \
8396 do {    p = (u32 *)(orig_p + (base));           \
8397         for (i = 0; i < len; i += 4)            \
8398                 __GET_REG32((base) + i);        \
8399 } while (0)
8400 #define GET_REG32_1(reg)                        \
8401 do {    p = (u32 *)(orig_p + (reg));            \
8402         __GET_REG32((reg));                     \
8403 } while (0)
8404
8405         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8406         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8407         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8408         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8409         GET_REG32_1(SNDDATAC_MODE);
8410         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8411         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8412         GET_REG32_1(SNDBDC_MODE);
8413         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8414         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8415         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8416         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8417         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8418         GET_REG32_1(RCVDCC_MODE);
8419         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8420         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8421         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8422         GET_REG32_1(MBFREE_MODE);
8423         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8424         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8425         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8426         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8427         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8428         GET_REG32_1(RX_CPU_MODE);
8429         GET_REG32_1(RX_CPU_STATE);
8430         GET_REG32_1(RX_CPU_PGMCTR);
8431         GET_REG32_1(RX_CPU_HWBKPT);
8432         GET_REG32_1(TX_CPU_MODE);
8433         GET_REG32_1(TX_CPU_STATE);
8434         GET_REG32_1(TX_CPU_PGMCTR);
8435         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8436         GET_REG32_LOOP(FTQ_RESET, 0x120);
8437         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8438         GET_REG32_1(DMAC_MODE);
8439         GET_REG32_LOOP(GRC_MODE, 0x4c);
8440         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8441                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8442
8443 #undef __GET_REG32
8444 #undef GET_REG32_LOOP
8445 #undef GET_REG32_1
8446
8447         tg3_full_unlock(tp);
8448 }
8449
8450 static int tg3_get_eeprom_len(struct net_device *dev)
8451 {
8452         struct tg3 *tp = netdev_priv(dev);
8453
8454         return tp->nvram_size;
8455 }
8456
8457 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8458 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8459 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8460
8461 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8462 {
8463         struct tg3 *tp = netdev_priv(dev);
8464         int ret;
8465         u8  *pd;
8466         u32 i, offset, len, b_offset, b_count;
8467         __le32 val;
8468
8469         if (tp->link_config.phy_is_low_power)
8470                 return -EAGAIN;
8471
8472         offset = eeprom->offset;
8473         len = eeprom->len;
8474         eeprom->len = 0;
8475
8476         eeprom->magic = TG3_EEPROM_MAGIC;
8477
8478         if (offset & 3) {
8479                 /* adjustments to start on required 4 byte boundary */
8480                 b_offset = offset & 3;
8481                 b_count = 4 - b_offset;
8482                 if (b_count > len) {
8483                         /* i.e. offset=1 len=2 */
8484                         b_count = len;
8485                 }
8486                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8487                 if (ret)
8488                         return ret;
8489                 memcpy(data, ((char*)&val) + b_offset, b_count);
8490                 len -= b_count;
8491                 offset += b_count;
8492                 eeprom->len += b_count;
8493         }
8494
8495         /* read bytes upto the last 4 byte boundary */
8496         pd = &data[eeprom->len];
8497         for (i = 0; i < (len - (len & 3)); i += 4) {
8498                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8499                 if (ret) {
8500                         eeprom->len += i;
8501                         return ret;
8502                 }
8503                 memcpy(pd + i, &val, 4);
8504         }
8505         eeprom->len += i;
8506
8507         if (len & 3) {
8508                 /* read last bytes not ending on 4 byte boundary */
8509                 pd = &data[eeprom->len];
8510                 b_count = len & 3;
8511                 b_offset = offset + len - b_count;
8512                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8513                 if (ret)
8514                         return ret;
8515                 memcpy(pd, &val, b_count);
8516                 eeprom->len += b_count;
8517         }
8518         return 0;
8519 }
8520
8521 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8522
8523 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8524 {
8525         struct tg3 *tp = netdev_priv(dev);
8526         int ret;
8527         u32 offset, len, b_offset, odd_len;
8528         u8 *buf;
8529         __le32 start, end;
8530
8531         if (tp->link_config.phy_is_low_power)
8532                 return -EAGAIN;
8533
8534         if (eeprom->magic != TG3_EEPROM_MAGIC)
8535                 return -EINVAL;
8536
8537         offset = eeprom->offset;
8538         len = eeprom->len;
8539
8540         if ((b_offset = (offset & 3))) {
8541                 /* adjustments to start on required 4 byte boundary */
8542                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8543                 if (ret)
8544                         return ret;
8545                 len += b_offset;
8546                 offset &= ~3;
8547                 if (len < 4)
8548                         len = 4;
8549         }
8550
8551         odd_len = 0;
8552         if (len & 3) {
8553                 /* adjustments to end on required 4 byte boundary */
8554                 odd_len = 1;
8555                 len = (len + 3) & ~3;
8556                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8557                 if (ret)
8558                         return ret;
8559         }
8560
8561         buf = data;
8562         if (b_offset || odd_len) {
8563                 buf = kmalloc(len, GFP_KERNEL);
8564                 if (!buf)
8565                         return -ENOMEM;
8566                 if (b_offset)
8567                         memcpy(buf, &start, 4);
8568                 if (odd_len)
8569                         memcpy(buf+len-4, &end, 4);
8570                 memcpy(buf + b_offset, data, eeprom->len);
8571         }
8572
8573         ret = tg3_nvram_write_block(tp, offset, len, buf);
8574
8575         if (buf != data)
8576                 kfree(buf);
8577
8578         return ret;
8579 }
8580
8581 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8582 {
8583         struct tg3 *tp = netdev_priv(dev);
8584
8585         cmd->supported = (SUPPORTED_Autoneg);
8586
8587         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8588                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8589                                    SUPPORTED_1000baseT_Full);
8590
8591         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8592                 cmd->supported |= (SUPPORTED_100baseT_Half |
8593                                   SUPPORTED_100baseT_Full |
8594                                   SUPPORTED_10baseT_Half |
8595                                   SUPPORTED_10baseT_Full |
8596                                   SUPPORTED_TP);
8597                 cmd->port = PORT_TP;
8598         } else {
8599                 cmd->supported |= SUPPORTED_FIBRE;
8600                 cmd->port = PORT_FIBRE;
8601         }
8602
8603         cmd->advertising = tp->link_config.advertising;
8604         if (netif_running(dev)) {
8605                 cmd->speed = tp->link_config.active_speed;
8606                 cmd->duplex = tp->link_config.active_duplex;
8607         }
8608         cmd->phy_address = PHY_ADDR;
8609         cmd->transceiver = 0;
8610         cmd->autoneg = tp->link_config.autoneg;
8611         cmd->maxtxpkt = 0;
8612         cmd->maxrxpkt = 0;
8613         return 0;
8614 }
8615
8616 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8617 {
8618         struct tg3 *tp = netdev_priv(dev);
8619
8620         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8621                 /* These are the only valid advertisement bits allowed.  */
8622                 if (cmd->autoneg == AUTONEG_ENABLE &&
8623                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8624                                           ADVERTISED_1000baseT_Full |
8625                                           ADVERTISED_Autoneg |
8626                                           ADVERTISED_FIBRE)))
8627                         return -EINVAL;
8628                 /* Fiber can only do SPEED_1000.  */
8629                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8630                          (cmd->speed != SPEED_1000))
8631                         return -EINVAL;
8632         /* Copper cannot force SPEED_1000.  */
8633         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8634                    (cmd->speed == SPEED_1000))
8635                 return -EINVAL;
8636         else if ((cmd->speed == SPEED_1000) &&
8637                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8638                 return -EINVAL;
8639
8640         tg3_full_lock(tp, 0);
8641
8642         tp->link_config.autoneg = cmd->autoneg;
8643         if (cmd->autoneg == AUTONEG_ENABLE) {
8644                 tp->link_config.advertising = (cmd->advertising |
8645                                               ADVERTISED_Autoneg);
8646                 tp->link_config.speed = SPEED_INVALID;
8647                 tp->link_config.duplex = DUPLEX_INVALID;
8648         } else {
8649                 tp->link_config.advertising = 0;
8650                 tp->link_config.speed = cmd->speed;
8651                 tp->link_config.duplex = cmd->duplex;
8652         }
8653
8654         tp->link_config.orig_speed = tp->link_config.speed;
8655         tp->link_config.orig_duplex = tp->link_config.duplex;
8656         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8657
8658         if (netif_running(dev))
8659                 tg3_setup_phy(tp, 1);
8660
8661         tg3_full_unlock(tp);
8662
8663         return 0;
8664 }
8665
8666 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8667 {
8668         struct tg3 *tp = netdev_priv(dev);
8669
8670         strcpy(info->driver, DRV_MODULE_NAME);
8671         strcpy(info->version, DRV_MODULE_VERSION);
8672         strcpy(info->fw_version, tp->fw_ver);
8673         strcpy(info->bus_info, pci_name(tp->pdev));
8674 }
8675
8676 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8677 {
8678         struct tg3 *tp = netdev_priv(dev);
8679
8680         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8681                 wol->supported = WAKE_MAGIC;
8682         else
8683                 wol->supported = 0;
8684         wol->wolopts = 0;
8685         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8686                 wol->wolopts = WAKE_MAGIC;
8687         memset(&wol->sopass, 0, sizeof(wol->sopass));
8688 }
8689
8690 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8691 {
8692         struct tg3 *tp = netdev_priv(dev);
8693
8694         if (wol->wolopts & ~WAKE_MAGIC)
8695                 return -EINVAL;
8696         if ((wol->wolopts & WAKE_MAGIC) &&
8697             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8698                 return -EINVAL;
8699
8700         spin_lock_bh(&tp->lock);
8701         if (wol->wolopts & WAKE_MAGIC)
8702                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8703         else
8704                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8705         spin_unlock_bh(&tp->lock);
8706
8707         return 0;
8708 }
8709
8710 static u32 tg3_get_msglevel(struct net_device *dev)
8711 {
8712         struct tg3 *tp = netdev_priv(dev);
8713         return tp->msg_enable;
8714 }
8715
8716 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8717 {
8718         struct tg3 *tp = netdev_priv(dev);
8719         tp->msg_enable = value;
8720 }
8721
8722 static int tg3_set_tso(struct net_device *dev, u32 value)
8723 {
8724         struct tg3 *tp = netdev_priv(dev);
8725
8726         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8727                 if (value)
8728                         return -EINVAL;
8729                 return 0;
8730         }
8731         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8732             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8733                 if (value) {
8734                         dev->features |= NETIF_F_TSO6;
8735                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8736                                 dev->features |= NETIF_F_TSO_ECN;
8737                 } else
8738                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8739         }
8740         return ethtool_op_set_tso(dev, value);
8741 }
8742
8743 static int tg3_nway_reset(struct net_device *dev)
8744 {
8745         struct tg3 *tp = netdev_priv(dev);
8746         u32 bmcr;
8747         int r;
8748
8749         if (!netif_running(dev))
8750                 return -EAGAIN;
8751
8752         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8753                 return -EINVAL;
8754
8755         spin_lock_bh(&tp->lock);
8756         r = -EINVAL;
8757         tg3_readphy(tp, MII_BMCR, &bmcr);
8758         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8759             ((bmcr & BMCR_ANENABLE) ||
8760              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8761                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8762                                            BMCR_ANENABLE);
8763                 r = 0;
8764         }
8765         spin_unlock_bh(&tp->lock);
8766
8767         return r;
8768 }
8769
8770 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8771 {
8772         struct tg3 *tp = netdev_priv(dev);
8773
8774         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8775         ering->rx_mini_max_pending = 0;
8776         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8777                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8778         else
8779                 ering->rx_jumbo_max_pending = 0;
8780
8781         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8782
8783         ering->rx_pending = tp->rx_pending;
8784         ering->rx_mini_pending = 0;
8785         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8786                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8787         else
8788                 ering->rx_jumbo_pending = 0;
8789
8790         ering->tx_pending = tp->tx_pending;
8791 }
8792
8793 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8794 {
8795         struct tg3 *tp = netdev_priv(dev);
8796         int irq_sync = 0, err = 0;
8797
8798         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8799             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8800             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8801             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8802             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8803              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8804                 return -EINVAL;
8805
8806         if (netif_running(dev)) {
8807                 tg3_netif_stop(tp);
8808                 irq_sync = 1;
8809         }
8810
8811         tg3_full_lock(tp, irq_sync);
8812
8813         tp->rx_pending = ering->rx_pending;
8814
8815         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8816             tp->rx_pending > 63)
8817                 tp->rx_pending = 63;
8818         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8819         tp->tx_pending = ering->tx_pending;
8820
8821         if (netif_running(dev)) {
8822                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8823                 err = tg3_restart_hw(tp, 1);
8824                 if (!err)
8825                         tg3_netif_start(tp);
8826         }
8827
8828         tg3_full_unlock(tp);
8829
8830         return err;
8831 }
8832
8833 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8834 {
8835         struct tg3 *tp = netdev_priv(dev);
8836
8837         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8838
8839         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8840                 epause->rx_pause = 1;
8841         else
8842                 epause->rx_pause = 0;
8843
8844         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8845                 epause->tx_pause = 1;
8846         else
8847                 epause->tx_pause = 0;
8848 }
8849
8850 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8851 {
8852         struct tg3 *tp = netdev_priv(dev);
8853         int irq_sync = 0, err = 0;
8854
8855         if (netif_running(dev)) {
8856                 tg3_netif_stop(tp);
8857                 irq_sync = 1;
8858         }
8859
8860         tg3_full_lock(tp, irq_sync);
8861
8862         if (epause->autoneg)
8863                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8864         else
8865                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8866         if (epause->rx_pause)
8867                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8868         else
8869                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8870         if (epause->tx_pause)
8871                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8872         else
8873                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8874
8875         if (netif_running(dev)) {
8876                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8877                 err = tg3_restart_hw(tp, 1);
8878                 if (!err)
8879                         tg3_netif_start(tp);
8880         }
8881
8882         tg3_full_unlock(tp);
8883
8884         return err;
8885 }
8886
8887 static u32 tg3_get_rx_csum(struct net_device *dev)
8888 {
8889         struct tg3 *tp = netdev_priv(dev);
8890         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8891 }
8892
8893 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8894 {
8895         struct tg3 *tp = netdev_priv(dev);
8896
8897         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8898                 if (data != 0)
8899                         return -EINVAL;
8900                 return 0;
8901         }
8902
8903         spin_lock_bh(&tp->lock);
8904         if (data)
8905                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8906         else
8907                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8908         spin_unlock_bh(&tp->lock);
8909
8910         return 0;
8911 }
8912
8913 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8914 {
8915         struct tg3 *tp = netdev_priv(dev);
8916
8917         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8918                 if (data != 0)
8919                         return -EINVAL;
8920                 return 0;
8921         }
8922
8923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8927                 ethtool_op_set_tx_ipv6_csum(dev, data);
8928         else
8929                 ethtool_op_set_tx_csum(dev, data);
8930
8931         return 0;
8932 }
8933
8934 static int tg3_get_sset_count (struct net_device *dev, int sset)
8935 {
8936         switch (sset) {
8937         case ETH_SS_TEST:
8938                 return TG3_NUM_TEST;
8939         case ETH_SS_STATS:
8940                 return TG3_NUM_STATS;
8941         default:
8942                 return -EOPNOTSUPP;
8943         }
8944 }
8945
8946 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8947 {
8948         switch (stringset) {
8949         case ETH_SS_STATS:
8950                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8951                 break;
8952         case ETH_SS_TEST:
8953                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8954                 break;
8955         default:
8956                 WARN_ON(1);     /* we need a WARN() */
8957                 break;
8958         }
8959 }
8960
8961 static int tg3_phys_id(struct net_device *dev, u32 data)
8962 {
8963         struct tg3 *tp = netdev_priv(dev);
8964         int i;
8965
8966         if (!netif_running(tp->dev))
8967                 return -EAGAIN;
8968
8969         if (data == 0)
8970                 data = UINT_MAX / 2;
8971
8972         for (i = 0; i < (data * 2); i++) {
8973                 if ((i % 2) == 0)
8974                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8975                                            LED_CTRL_1000MBPS_ON |
8976                                            LED_CTRL_100MBPS_ON |
8977                                            LED_CTRL_10MBPS_ON |
8978                                            LED_CTRL_TRAFFIC_OVERRIDE |
8979                                            LED_CTRL_TRAFFIC_BLINK |
8980                                            LED_CTRL_TRAFFIC_LED);
8981
8982                 else
8983                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8984                                            LED_CTRL_TRAFFIC_OVERRIDE);
8985
8986                 if (msleep_interruptible(500))
8987                         break;
8988         }
8989         tw32(MAC_LED_CTRL, tp->led_ctrl);
8990         return 0;
8991 }
8992
8993 static void tg3_get_ethtool_stats (struct net_device *dev,
8994                                    struct ethtool_stats *estats, u64 *tmp_stats)
8995 {
8996         struct tg3 *tp = netdev_priv(dev);
8997         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8998 }
8999
9000 #define NVRAM_TEST_SIZE 0x100
9001 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9002 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9003 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9004 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9005 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9006
9007 static int tg3_test_nvram(struct tg3 *tp)
9008 {
9009         u32 csum, magic;
9010         __le32 *buf;
9011         int i, j, k, err = 0, size;
9012
9013         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9014                 return -EIO;
9015
9016         if (magic == TG3_EEPROM_MAGIC)
9017                 size = NVRAM_TEST_SIZE;
9018         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9019                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9020                     TG3_EEPROM_SB_FORMAT_1) {
9021                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9022                         case TG3_EEPROM_SB_REVISION_0:
9023                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9024                                 break;
9025                         case TG3_EEPROM_SB_REVISION_2:
9026                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9027                                 break;
9028                         case TG3_EEPROM_SB_REVISION_3:
9029                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9030                                 break;
9031                         default:
9032                                 return 0;
9033                         }
9034                 } else
9035                         return 0;
9036         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9037                 size = NVRAM_SELFBOOT_HW_SIZE;
9038         else
9039                 return -EIO;
9040
9041         buf = kmalloc(size, GFP_KERNEL);
9042         if (buf == NULL)
9043                 return -ENOMEM;
9044
9045         err = -EIO;
9046         for (i = 0, j = 0; i < size; i += 4, j++) {
9047                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9048                         break;
9049         }
9050         if (i < size)
9051                 goto out;
9052
9053         /* Selfboot format */
9054         magic = swab32(le32_to_cpu(buf[0]));
9055         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9056             TG3_EEPROM_MAGIC_FW) {
9057                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9058
9059                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9060                     TG3_EEPROM_SB_REVISION_2) {
9061                         /* For rev 2, the csum doesn't include the MBA. */
9062                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9063                                 csum8 += buf8[i];
9064                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9065                                 csum8 += buf8[i];
9066                 } else {
9067                         for (i = 0; i < size; i++)
9068                                 csum8 += buf8[i];
9069                 }
9070
9071                 if (csum8 == 0) {
9072                         err = 0;
9073                         goto out;
9074                 }
9075
9076                 err = -EIO;
9077                 goto out;
9078         }
9079
9080         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9081             TG3_EEPROM_MAGIC_HW) {
9082                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9083                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9084                 u8 *buf8 = (u8 *) buf;
9085
9086                 /* Separate the parity bits and the data bytes.  */
9087                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9088                         if ((i == 0) || (i == 8)) {
9089                                 int l;
9090                                 u8 msk;
9091
9092                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9093                                         parity[k++] = buf8[i] & msk;
9094                                 i++;
9095                         }
9096                         else if (i == 16) {
9097                                 int l;
9098                                 u8 msk;
9099
9100                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9101                                         parity[k++] = buf8[i] & msk;
9102                                 i++;
9103
9104                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9105                                         parity[k++] = buf8[i] & msk;
9106                                 i++;
9107                         }
9108                         data[j++] = buf8[i];
9109                 }
9110
9111                 err = -EIO;
9112                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9113                         u8 hw8 = hweight8(data[i]);
9114
9115                         if ((hw8 & 0x1) && parity[i])
9116                                 goto out;
9117                         else if (!(hw8 & 0x1) && !parity[i])
9118                                 goto out;
9119                 }
9120                 err = 0;
9121                 goto out;
9122         }
9123
9124         /* Bootstrap checksum at offset 0x10 */
9125         csum = calc_crc((unsigned char *) buf, 0x10);
9126         if(csum != le32_to_cpu(buf[0x10/4]))
9127                 goto out;
9128
9129         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9130         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9131         if (csum != le32_to_cpu(buf[0xfc/4]))
9132                  goto out;
9133
9134         err = 0;
9135
9136 out:
9137         kfree(buf);
9138         return err;
9139 }
9140
9141 #define TG3_SERDES_TIMEOUT_SEC  2
9142 #define TG3_COPPER_TIMEOUT_SEC  6
9143
9144 static int tg3_test_link(struct tg3 *tp)
9145 {
9146         int i, max;
9147
9148         if (!netif_running(tp->dev))
9149                 return -ENODEV;
9150
9151         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9152                 max = TG3_SERDES_TIMEOUT_SEC;
9153         else
9154                 max = TG3_COPPER_TIMEOUT_SEC;
9155
9156         for (i = 0; i < max; i++) {
9157                 if (netif_carrier_ok(tp->dev))
9158                         return 0;
9159
9160                 if (msleep_interruptible(1000))
9161                         break;
9162         }
9163
9164         return -EIO;
9165 }
9166
9167 /* Only test the commonly used registers */
9168 static int tg3_test_registers(struct tg3 *tp)
9169 {
9170         int i, is_5705, is_5750;
9171         u32 offset, read_mask, write_mask, val, save_val, read_val;
9172         static struct {
9173                 u16 offset;
9174                 u16 flags;
9175 #define TG3_FL_5705     0x1
9176 #define TG3_FL_NOT_5705 0x2
9177 #define TG3_FL_NOT_5788 0x4
9178 #define TG3_FL_NOT_5750 0x8
9179                 u32 read_mask;
9180                 u32 write_mask;
9181         } reg_tbl[] = {
9182                 /* MAC Control Registers */
9183                 { MAC_MODE, TG3_FL_NOT_5705,
9184                         0x00000000, 0x00ef6f8c },
9185                 { MAC_MODE, TG3_FL_5705,
9186                         0x00000000, 0x01ef6b8c },
9187                 { MAC_STATUS, TG3_FL_NOT_5705,
9188                         0x03800107, 0x00000000 },
9189                 { MAC_STATUS, TG3_FL_5705,
9190                         0x03800100, 0x00000000 },
9191                 { MAC_ADDR_0_HIGH, 0x0000,
9192                         0x00000000, 0x0000ffff },
9193                 { MAC_ADDR_0_LOW, 0x0000,
9194                         0x00000000, 0xffffffff },
9195                 { MAC_RX_MTU_SIZE, 0x0000,
9196                         0x00000000, 0x0000ffff },
9197                 { MAC_TX_MODE, 0x0000,
9198                         0x00000000, 0x00000070 },
9199                 { MAC_TX_LENGTHS, 0x0000,
9200                         0x00000000, 0x00003fff },
9201                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9202                         0x00000000, 0x000007fc },
9203                 { MAC_RX_MODE, TG3_FL_5705,
9204                         0x00000000, 0x000007dc },
9205                 { MAC_HASH_REG_0, 0x0000,
9206                         0x00000000, 0xffffffff },
9207                 { MAC_HASH_REG_1, 0x0000,
9208                         0x00000000, 0xffffffff },
9209                 { MAC_HASH_REG_2, 0x0000,
9210                         0x00000000, 0xffffffff },
9211                 { MAC_HASH_REG_3, 0x0000,
9212                         0x00000000, 0xffffffff },
9213
9214                 /* Receive Data and Receive BD Initiator Control Registers. */
9215                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9216                         0x00000000, 0xffffffff },
9217                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9218                         0x00000000, 0xffffffff },
9219                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9220                         0x00000000, 0x00000003 },
9221                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9222                         0x00000000, 0xffffffff },
9223                 { RCVDBDI_STD_BD+0, 0x0000,
9224                         0x00000000, 0xffffffff },
9225                 { RCVDBDI_STD_BD+4, 0x0000,
9226                         0x00000000, 0xffffffff },
9227                 { RCVDBDI_STD_BD+8, 0x0000,
9228                         0x00000000, 0xffff0002 },
9229                 { RCVDBDI_STD_BD+0xc, 0x0000,
9230                         0x00000000, 0xffffffff },
9231
9232                 /* Receive BD Initiator Control Registers. */
9233                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9234                         0x00000000, 0xffffffff },
9235                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9236                         0x00000000, 0x000003ff },
9237                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9238                         0x00000000, 0xffffffff },
9239
9240                 /* Host Coalescing Control Registers. */
9241                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9242                         0x00000000, 0x00000004 },
9243                 { HOSTCC_MODE, TG3_FL_5705,
9244                         0x00000000, 0x000000f6 },
9245                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9246                         0x00000000, 0xffffffff },
9247                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9248                         0x00000000, 0x000003ff },
9249                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9250                         0x00000000, 0xffffffff },
9251                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9252                         0x00000000, 0x000003ff },
9253                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9254                         0x00000000, 0xffffffff },
9255                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9256                         0x00000000, 0x000000ff },
9257                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9258                         0x00000000, 0xffffffff },
9259                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9260                         0x00000000, 0x000000ff },
9261                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9262                         0x00000000, 0xffffffff },
9263                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9264                         0x00000000, 0xffffffff },
9265                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9266                         0x00000000, 0xffffffff },
9267                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9268                         0x00000000, 0x000000ff },
9269                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9270                         0x00000000, 0xffffffff },
9271                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9272                         0x00000000, 0x000000ff },
9273                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9274                         0x00000000, 0xffffffff },
9275                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9276                         0x00000000, 0xffffffff },
9277                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9278                         0x00000000, 0xffffffff },
9279                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9280                         0x00000000, 0xffffffff },
9281                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9282                         0x00000000, 0xffffffff },
9283                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9284                         0xffffffff, 0x00000000 },
9285                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9286                         0xffffffff, 0x00000000 },
9287
9288                 /* Buffer Manager Control Registers. */
9289                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9290                         0x00000000, 0x007fff80 },
9291                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9292                         0x00000000, 0x007fffff },
9293                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9294                         0x00000000, 0x0000003f },
9295                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9296                         0x00000000, 0x000001ff },
9297                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9298                         0x00000000, 0x000001ff },
9299                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9300                         0xffffffff, 0x00000000 },
9301                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9302                         0xffffffff, 0x00000000 },
9303
9304                 /* Mailbox Registers */
9305                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9306                         0x00000000, 0x000001ff },
9307                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9308                         0x00000000, 0x000001ff },
9309                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9310                         0x00000000, 0x000007ff },
9311                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9312                         0x00000000, 0x000001ff },
9313
9314                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9315         };
9316
9317         is_5705 = is_5750 = 0;
9318         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9319                 is_5705 = 1;
9320                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9321                         is_5750 = 1;
9322         }
9323
9324         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9325                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9326                         continue;
9327
9328                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9329                         continue;
9330
9331                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9332                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9333                         continue;
9334
9335                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9336                         continue;
9337
9338                 offset = (u32) reg_tbl[i].offset;
9339                 read_mask = reg_tbl[i].read_mask;
9340                 write_mask = reg_tbl[i].write_mask;
9341
9342                 /* Save the original register content */
9343                 save_val = tr32(offset);
9344
9345                 /* Determine the read-only value. */
9346                 read_val = save_val & read_mask;
9347
9348                 /* Write zero to the register, then make sure the read-only bits
9349                  * are not changed and the read/write bits are all zeros.
9350                  */
9351                 tw32(offset, 0);
9352
9353                 val = tr32(offset);
9354
9355                 /* Test the read-only and read/write bits. */
9356                 if (((val & read_mask) != read_val) || (val & write_mask))
9357                         goto out;
9358
9359                 /* Write ones to all the bits defined by RdMask and WrMask, then
9360                  * make sure the read-only bits are not changed and the
9361                  * read/write bits are all ones.
9362                  */
9363                 tw32(offset, read_mask | write_mask);
9364
9365                 val = tr32(offset);
9366
9367                 /* Test the read-only bits. */
9368                 if ((val & read_mask) != read_val)
9369                         goto out;
9370
9371                 /* Test the read/write bits. */
9372                 if ((val & write_mask) != write_mask)
9373                         goto out;
9374
9375                 tw32(offset, save_val);
9376         }
9377
9378         return 0;
9379
9380 out:
9381         if (netif_msg_hw(tp))
9382                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9383                        offset);
9384         tw32(offset, save_val);
9385         return -EIO;
9386 }
9387
9388 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9389 {
9390         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9391         int i;
9392         u32 j;
9393
9394         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9395                 for (j = 0; j < len; j += 4) {
9396                         u32 val;
9397
9398                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9399                         tg3_read_mem(tp, offset + j, &val);
9400                         if (val != test_pattern[i])
9401                                 return -EIO;
9402                 }
9403         }
9404         return 0;
9405 }
9406
9407 static int tg3_test_memory(struct tg3 *tp)
9408 {
9409         static struct mem_entry {
9410                 u32 offset;
9411                 u32 len;
9412         } mem_tbl_570x[] = {
9413                 { 0x00000000, 0x00b50},
9414                 { 0x00002000, 0x1c000},
9415                 { 0xffffffff, 0x00000}
9416         }, mem_tbl_5705[] = {
9417                 { 0x00000100, 0x0000c},
9418                 { 0x00000200, 0x00008},
9419                 { 0x00004000, 0x00800},
9420                 { 0x00006000, 0x01000},
9421                 { 0x00008000, 0x02000},
9422                 { 0x00010000, 0x0e000},
9423                 { 0xffffffff, 0x00000}
9424         }, mem_tbl_5755[] = {
9425                 { 0x00000200, 0x00008},
9426                 { 0x00004000, 0x00800},
9427                 { 0x00006000, 0x00800},
9428                 { 0x00008000, 0x02000},
9429                 { 0x00010000, 0x0c000},
9430                 { 0xffffffff, 0x00000}
9431         }, mem_tbl_5906[] = {
9432                 { 0x00000200, 0x00008},
9433                 { 0x00004000, 0x00400},
9434                 { 0x00006000, 0x00400},
9435                 { 0x00008000, 0x01000},
9436                 { 0x00010000, 0x01000},
9437                 { 0xffffffff, 0x00000}
9438         };
9439         struct mem_entry *mem_tbl;
9440         int err = 0;
9441         int i;
9442
9443         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9444                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9445                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9446                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9447                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9448                         mem_tbl = mem_tbl_5755;
9449                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9450                         mem_tbl = mem_tbl_5906;
9451                 else
9452                         mem_tbl = mem_tbl_5705;
9453         } else
9454                 mem_tbl = mem_tbl_570x;
9455
9456         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9457                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9458                     mem_tbl[i].len)) != 0)
9459                         break;
9460         }
9461
9462         return err;
9463 }
9464
9465 #define TG3_MAC_LOOPBACK        0
9466 #define TG3_PHY_LOOPBACK        1
9467
9468 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9469 {
9470         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9471         u32 desc_idx;
9472         struct sk_buff *skb, *rx_skb;
9473         u8 *tx_data;
9474         dma_addr_t map;
9475         int num_pkts, tx_len, rx_len, i, err;
9476         struct tg3_rx_buffer_desc *desc;
9477
9478         if (loopback_mode == TG3_MAC_LOOPBACK) {
9479                 /* HW errata - mac loopback fails in some cases on 5780.
9480                  * Normal traffic and PHY loopback are not affected by
9481                  * errata.
9482                  */
9483                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9484                         return 0;
9485
9486                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9487                            MAC_MODE_PORT_INT_LPBACK;
9488                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9489                         mac_mode |= MAC_MODE_LINK_POLARITY;
9490                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9491                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9492                 else
9493                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9494                 tw32(MAC_MODE, mac_mode);
9495         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9496                 u32 val;
9497
9498                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9499                         u32 phytest;
9500
9501                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9502                                 u32 phy;
9503
9504                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9505                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9506                                 if (!tg3_readphy(tp, 0x1b, &phy))
9507                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9508                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9509                         }
9510                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9511                 } else
9512                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9513
9514                 tg3_phy_toggle_automdix(tp, 0);
9515
9516                 tg3_writephy(tp, MII_BMCR, val);
9517                 udelay(40);
9518
9519                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9520                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9521                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9522                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9523                 } else
9524                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9525
9526                 /* reset to prevent losing 1st rx packet intermittently */
9527                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9528                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9529                         udelay(10);
9530                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9531                 }
9532                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9533                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9534                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9535                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9536                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9537                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9538                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9539                 }
9540                 tw32(MAC_MODE, mac_mode);
9541         }
9542         else
9543                 return -EINVAL;
9544
9545         err = -EIO;
9546
9547         tx_len = 1514;
9548         skb = netdev_alloc_skb(tp->dev, tx_len);
9549         if (!skb)
9550                 return -ENOMEM;
9551
9552         tx_data = skb_put(skb, tx_len);
9553         memcpy(tx_data, tp->dev->dev_addr, 6);
9554         memset(tx_data + 6, 0x0, 8);
9555
9556         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9557
9558         for (i = 14; i < tx_len; i++)
9559                 tx_data[i] = (u8) (i & 0xff);
9560
9561         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9562
9563         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9564              HOSTCC_MODE_NOW);
9565
9566         udelay(10);
9567
9568         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9569
9570         num_pkts = 0;
9571
9572         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9573
9574         tp->tx_prod++;
9575         num_pkts++;
9576
9577         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9578                      tp->tx_prod);
9579         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9580
9581         udelay(10);
9582
9583         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9584         for (i = 0; i < 25; i++) {
9585                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9586                        HOSTCC_MODE_NOW);
9587
9588                 udelay(10);
9589
9590                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9591                 rx_idx = tp->hw_status->idx[0].rx_producer;
9592                 if ((tx_idx == tp->tx_prod) &&
9593                     (rx_idx == (rx_start_idx + num_pkts)))
9594                         break;
9595         }
9596
9597         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9598         dev_kfree_skb(skb);
9599
9600         if (tx_idx != tp->tx_prod)
9601                 goto out;
9602
9603         if (rx_idx != rx_start_idx + num_pkts)
9604                 goto out;
9605
9606         desc = &tp->rx_rcb[rx_start_idx];
9607         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9608         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9609         if (opaque_key != RXD_OPAQUE_RING_STD)
9610                 goto out;
9611
9612         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9613             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9614                 goto out;
9615
9616         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9617         if (rx_len != tx_len)
9618                 goto out;
9619
9620         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9621
9622         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9623         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9624
9625         for (i = 14; i < tx_len; i++) {
9626                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9627                         goto out;
9628         }
9629         err = 0;
9630
9631         /* tg3_free_rings will unmap and free the rx_skb */
9632 out:
9633         return err;
9634 }
9635
9636 #define TG3_MAC_LOOPBACK_FAILED         1
9637 #define TG3_PHY_LOOPBACK_FAILED         2
9638 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9639                                          TG3_PHY_LOOPBACK_FAILED)
9640
9641 static int tg3_test_loopback(struct tg3 *tp)
9642 {
9643         int err = 0;
9644         u32 cpmuctrl = 0;
9645
9646         if (!netif_running(tp->dev))
9647                 return TG3_LOOPBACK_FAILED;
9648
9649         err = tg3_reset_hw(tp, 1);
9650         if (err)
9651                 return TG3_LOOPBACK_FAILED;
9652
9653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9654             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9655                 int i;
9656                 u32 status;
9657
9658                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9659
9660                 /* Wait for up to 40 microseconds to acquire lock. */
9661                 for (i = 0; i < 4; i++) {
9662                         status = tr32(TG3_CPMU_MUTEX_GNT);
9663                         if (status == CPMU_MUTEX_GNT_DRIVER)
9664                                 break;
9665                         udelay(10);
9666                 }
9667
9668                 if (status != CPMU_MUTEX_GNT_DRIVER)
9669                         return TG3_LOOPBACK_FAILED;
9670
9671                 /* Turn off link-based power management. */
9672                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9673                 tw32(TG3_CPMU_CTRL,
9674                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9675                                   CPMU_CTRL_LINK_AWARE_MODE));
9676         }
9677
9678         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9679                 err |= TG3_MAC_LOOPBACK_FAILED;
9680
9681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9683                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9684
9685                 /* Release the mutex */
9686                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9687         }
9688
9689         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9690                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9691                         err |= TG3_PHY_LOOPBACK_FAILED;
9692         }
9693
9694         return err;
9695 }
9696
9697 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9698                           u64 *data)
9699 {
9700         struct tg3 *tp = netdev_priv(dev);
9701
9702         if (tp->link_config.phy_is_low_power)
9703                 tg3_set_power_state(tp, PCI_D0);
9704
9705         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9706
9707         if (tg3_test_nvram(tp) != 0) {
9708                 etest->flags |= ETH_TEST_FL_FAILED;
9709                 data[0] = 1;
9710         }
9711         if (tg3_test_link(tp) != 0) {
9712                 etest->flags |= ETH_TEST_FL_FAILED;
9713                 data[1] = 1;
9714         }
9715         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9716                 int err, irq_sync = 0;
9717
9718                 if (netif_running(dev)) {
9719                         tg3_netif_stop(tp);
9720                         irq_sync = 1;
9721                 }
9722
9723                 tg3_full_lock(tp, irq_sync);
9724
9725                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9726                 err = tg3_nvram_lock(tp);
9727                 tg3_halt_cpu(tp, RX_CPU_BASE);
9728                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9729                         tg3_halt_cpu(tp, TX_CPU_BASE);
9730                 if (!err)
9731                         tg3_nvram_unlock(tp);
9732
9733                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9734                         tg3_phy_reset(tp);
9735
9736                 if (tg3_test_registers(tp) != 0) {
9737                         etest->flags |= ETH_TEST_FL_FAILED;
9738                         data[2] = 1;
9739                 }
9740                 if (tg3_test_memory(tp) != 0) {
9741                         etest->flags |= ETH_TEST_FL_FAILED;
9742                         data[3] = 1;
9743                 }
9744                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9745                         etest->flags |= ETH_TEST_FL_FAILED;
9746
9747                 tg3_full_unlock(tp);
9748
9749                 if (tg3_test_interrupt(tp) != 0) {
9750                         etest->flags |= ETH_TEST_FL_FAILED;
9751                         data[5] = 1;
9752                 }
9753
9754                 tg3_full_lock(tp, 0);
9755
9756                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9757                 if (netif_running(dev)) {
9758                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9759                         if (!tg3_restart_hw(tp, 1))
9760                                 tg3_netif_start(tp);
9761                 }
9762
9763                 tg3_full_unlock(tp);
9764         }
9765         if (tp->link_config.phy_is_low_power)
9766                 tg3_set_power_state(tp, PCI_D3hot);
9767
9768 }
9769
9770 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9771 {
9772         struct mii_ioctl_data *data = if_mii(ifr);
9773         struct tg3 *tp = netdev_priv(dev);
9774         int err;
9775
9776         switch(cmd) {
9777         case SIOCGMIIPHY:
9778                 data->phy_id = PHY_ADDR;
9779
9780                 /* fallthru */
9781         case SIOCGMIIREG: {
9782                 u32 mii_regval;
9783
9784                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9785                         break;                  /* We have no PHY */
9786
9787                 if (tp->link_config.phy_is_low_power)
9788                         return -EAGAIN;
9789
9790                 spin_lock_bh(&tp->lock);
9791                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9792                 spin_unlock_bh(&tp->lock);
9793
9794                 data->val_out = mii_regval;
9795
9796                 return err;
9797         }
9798
9799         case SIOCSMIIREG:
9800                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9801                         break;                  /* We have no PHY */
9802
9803                 if (!capable(CAP_NET_ADMIN))
9804                         return -EPERM;
9805
9806                 if (tp->link_config.phy_is_low_power)
9807                         return -EAGAIN;
9808
9809                 spin_lock_bh(&tp->lock);
9810                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9811                 spin_unlock_bh(&tp->lock);
9812
9813                 return err;
9814
9815         default:
9816                 /* do nothing */
9817                 break;
9818         }
9819         return -EOPNOTSUPP;
9820 }
9821
9822 #if TG3_VLAN_TAG_USED
9823 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9824 {
9825         struct tg3 *tp = netdev_priv(dev);
9826
9827         if (netif_running(dev))
9828                 tg3_netif_stop(tp);
9829
9830         tg3_full_lock(tp, 0);
9831
9832         tp->vlgrp = grp;
9833
9834         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9835         __tg3_set_rx_mode(dev);
9836
9837         if (netif_running(dev))
9838                 tg3_netif_start(tp);
9839
9840         tg3_full_unlock(tp);
9841 }
9842 #endif
9843
9844 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9845 {
9846         struct tg3 *tp = netdev_priv(dev);
9847
9848         memcpy(ec, &tp->coal, sizeof(*ec));
9849         return 0;
9850 }
9851
9852 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9853 {
9854         struct tg3 *tp = netdev_priv(dev);
9855         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9856         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9857
9858         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9859                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9860                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9861                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9862                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9863         }
9864
9865         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9866             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9867             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9868             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9869             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9870             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9871             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9872             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9873             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9874             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9875                 return -EINVAL;
9876
9877         /* No rx interrupts will be generated if both are zero */
9878         if ((ec->rx_coalesce_usecs == 0) &&
9879             (ec->rx_max_coalesced_frames == 0))
9880                 return -EINVAL;
9881
9882         /* No tx interrupts will be generated if both are zero */
9883         if ((ec->tx_coalesce_usecs == 0) &&
9884             (ec->tx_max_coalesced_frames == 0))
9885                 return -EINVAL;
9886
9887         /* Only copy relevant parameters, ignore all others. */
9888         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9889         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9890         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9891         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9892         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9893         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9894         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9895         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9896         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9897
9898         if (netif_running(dev)) {
9899                 tg3_full_lock(tp, 0);
9900                 __tg3_set_coalesce(tp, &tp->coal);
9901                 tg3_full_unlock(tp);
9902         }
9903         return 0;
9904 }
9905
9906 static const struct ethtool_ops tg3_ethtool_ops = {
9907         .get_settings           = tg3_get_settings,
9908         .set_settings           = tg3_set_settings,
9909         .get_drvinfo            = tg3_get_drvinfo,
9910         .get_regs_len           = tg3_get_regs_len,
9911         .get_regs               = tg3_get_regs,
9912         .get_wol                = tg3_get_wol,
9913         .set_wol                = tg3_set_wol,
9914         .get_msglevel           = tg3_get_msglevel,
9915         .set_msglevel           = tg3_set_msglevel,
9916         .nway_reset             = tg3_nway_reset,
9917         .get_link               = ethtool_op_get_link,
9918         .get_eeprom_len         = tg3_get_eeprom_len,
9919         .get_eeprom             = tg3_get_eeprom,
9920         .set_eeprom             = tg3_set_eeprom,
9921         .get_ringparam          = tg3_get_ringparam,
9922         .set_ringparam          = tg3_set_ringparam,
9923         .get_pauseparam         = tg3_get_pauseparam,
9924         .set_pauseparam         = tg3_set_pauseparam,
9925         .get_rx_csum            = tg3_get_rx_csum,
9926         .set_rx_csum            = tg3_set_rx_csum,
9927         .set_tx_csum            = tg3_set_tx_csum,
9928         .set_sg                 = ethtool_op_set_sg,
9929         .set_tso                = tg3_set_tso,
9930         .self_test              = tg3_self_test,
9931         .get_strings            = tg3_get_strings,
9932         .phys_id                = tg3_phys_id,
9933         .get_ethtool_stats      = tg3_get_ethtool_stats,
9934         .get_coalesce           = tg3_get_coalesce,
9935         .set_coalesce           = tg3_set_coalesce,
9936         .get_sset_count         = tg3_get_sset_count,
9937 };
9938
9939 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9940 {
9941         u32 cursize, val, magic;
9942
9943         tp->nvram_size = EEPROM_CHIP_SIZE;
9944
9945         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9946                 return;
9947
9948         if ((magic != TG3_EEPROM_MAGIC) &&
9949             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9950             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9951                 return;
9952
9953         /*
9954          * Size the chip by reading offsets at increasing powers of two.
9955          * When we encounter our validation signature, we know the addressing
9956          * has wrapped around, and thus have our chip size.
9957          */
9958         cursize = 0x10;
9959
9960         while (cursize < tp->nvram_size) {
9961                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9962                         return;
9963
9964                 if (val == magic)
9965                         break;
9966
9967                 cursize <<= 1;
9968         }
9969
9970         tp->nvram_size = cursize;
9971 }
9972
9973 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9974 {
9975         u32 val;
9976
9977         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9978                 return;
9979
9980         /* Selfboot format */
9981         if (val != TG3_EEPROM_MAGIC) {
9982                 tg3_get_eeprom_size(tp);
9983                 return;
9984         }
9985
9986         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9987                 if (val != 0) {
9988                         tp->nvram_size = (val >> 16) * 1024;
9989                         return;
9990                 }
9991         }
9992         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
9993 }
9994
9995 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9996 {
9997         u32 nvcfg1;
9998
9999         nvcfg1 = tr32(NVRAM_CFG1);
10000         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10001                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10002         }
10003         else {
10004                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10005                 tw32(NVRAM_CFG1, nvcfg1);
10006         }
10007
10008         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10009             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10010                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10011                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10012                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10013                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10014                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10015                                 break;
10016                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10017                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10018                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10019                                 break;
10020                         case FLASH_VENDOR_ATMEL_EEPROM:
10021                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10022                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10023                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10024                                 break;
10025                         case FLASH_VENDOR_ST:
10026                                 tp->nvram_jedecnum = JEDEC_ST;
10027                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10028                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10029                                 break;
10030                         case FLASH_VENDOR_SAIFUN:
10031                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10032                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10033                                 break;
10034                         case FLASH_VENDOR_SST_SMALL:
10035                         case FLASH_VENDOR_SST_LARGE:
10036                                 tp->nvram_jedecnum = JEDEC_SST;
10037                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10038                                 break;
10039                 }
10040         }
10041         else {
10042                 tp->nvram_jedecnum = JEDEC_ATMEL;
10043                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10044                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10045         }
10046 }
10047
10048 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10049 {
10050         u32 nvcfg1;
10051
10052         nvcfg1 = tr32(NVRAM_CFG1);
10053
10054         /* NVRAM protection for TPM */
10055         if (nvcfg1 & (1 << 27))
10056                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10057
10058         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10059                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10060                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10061                         tp->nvram_jedecnum = JEDEC_ATMEL;
10062                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10063                         break;
10064                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10065                         tp->nvram_jedecnum = JEDEC_ATMEL;
10066                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10067                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10068                         break;
10069                 case FLASH_5752VENDOR_ST_M45PE10:
10070                 case FLASH_5752VENDOR_ST_M45PE20:
10071                 case FLASH_5752VENDOR_ST_M45PE40:
10072                         tp->nvram_jedecnum = JEDEC_ST;
10073                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10074                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10075                         break;
10076         }
10077
10078         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10079                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10080                         case FLASH_5752PAGE_SIZE_256:
10081                                 tp->nvram_pagesize = 256;
10082                                 break;
10083                         case FLASH_5752PAGE_SIZE_512:
10084                                 tp->nvram_pagesize = 512;
10085                                 break;
10086                         case FLASH_5752PAGE_SIZE_1K:
10087                                 tp->nvram_pagesize = 1024;
10088                                 break;
10089                         case FLASH_5752PAGE_SIZE_2K:
10090                                 tp->nvram_pagesize = 2048;
10091                                 break;
10092                         case FLASH_5752PAGE_SIZE_4K:
10093                                 tp->nvram_pagesize = 4096;
10094                                 break;
10095                         case FLASH_5752PAGE_SIZE_264:
10096                                 tp->nvram_pagesize = 264;
10097                                 break;
10098                 }
10099         }
10100         else {
10101                 /* For eeprom, set pagesize to maximum eeprom size */
10102                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10103
10104                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10105                 tw32(NVRAM_CFG1, nvcfg1);
10106         }
10107 }
10108
10109 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10110 {
10111         u32 nvcfg1, protect = 0;
10112
10113         nvcfg1 = tr32(NVRAM_CFG1);
10114
10115         /* NVRAM protection for TPM */
10116         if (nvcfg1 & (1 << 27)) {
10117                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10118                 protect = 1;
10119         }
10120
10121         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10122         switch (nvcfg1) {
10123                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10124                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10125                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10126                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10127                         tp->nvram_jedecnum = JEDEC_ATMEL;
10128                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10129                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10130                         tp->nvram_pagesize = 264;
10131                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10132                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10133                                 tp->nvram_size = (protect ? 0x3e200 :
10134                                                   TG3_NVRAM_SIZE_512KB);
10135                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10136                                 tp->nvram_size = (protect ? 0x1f200 :
10137                                                   TG3_NVRAM_SIZE_256KB);
10138                         else
10139                                 tp->nvram_size = (protect ? 0x1f200 :
10140                                                   TG3_NVRAM_SIZE_128KB);
10141                         break;
10142                 case FLASH_5752VENDOR_ST_M45PE10:
10143                 case FLASH_5752VENDOR_ST_M45PE20:
10144                 case FLASH_5752VENDOR_ST_M45PE40:
10145                         tp->nvram_jedecnum = JEDEC_ST;
10146                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10147                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10148                         tp->nvram_pagesize = 256;
10149                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10150                                 tp->nvram_size = (protect ?
10151                                                   TG3_NVRAM_SIZE_64KB :
10152                                                   TG3_NVRAM_SIZE_128KB);
10153                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10154                                 tp->nvram_size = (protect ?
10155                                                   TG3_NVRAM_SIZE_64KB :
10156                                                   TG3_NVRAM_SIZE_256KB);
10157                         else
10158                                 tp->nvram_size = (protect ?
10159                                                   TG3_NVRAM_SIZE_128KB :
10160                                                   TG3_NVRAM_SIZE_512KB);
10161                         break;
10162         }
10163 }
10164
10165 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10166 {
10167         u32 nvcfg1;
10168
10169         nvcfg1 = tr32(NVRAM_CFG1);
10170
10171         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10172                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10173                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10174                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10175                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10176                         tp->nvram_jedecnum = JEDEC_ATMEL;
10177                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10178                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10179
10180                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10181                         tw32(NVRAM_CFG1, nvcfg1);
10182                         break;
10183                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10184                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10185                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10186                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10187                         tp->nvram_jedecnum = JEDEC_ATMEL;
10188                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10189                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10190                         tp->nvram_pagesize = 264;
10191                         break;
10192                 case FLASH_5752VENDOR_ST_M45PE10:
10193                 case FLASH_5752VENDOR_ST_M45PE20:
10194                 case FLASH_5752VENDOR_ST_M45PE40:
10195                         tp->nvram_jedecnum = JEDEC_ST;
10196                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10197                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10198                         tp->nvram_pagesize = 256;
10199                         break;
10200         }
10201 }
10202
10203 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10204 {
10205         u32 nvcfg1, protect = 0;
10206
10207         nvcfg1 = tr32(NVRAM_CFG1);
10208
10209         /* NVRAM protection for TPM */
10210         if (nvcfg1 & (1 << 27)) {
10211                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10212                 protect = 1;
10213         }
10214
10215         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10216         switch (nvcfg1) {
10217                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10218                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10219                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10220                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10221                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10222                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10223                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10224                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10225                         tp->nvram_jedecnum = JEDEC_ATMEL;
10226                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10227                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10228                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10229                         tp->nvram_pagesize = 256;
10230                         break;
10231                 case FLASH_5761VENDOR_ST_A_M45PE20:
10232                 case FLASH_5761VENDOR_ST_A_M45PE40:
10233                 case FLASH_5761VENDOR_ST_A_M45PE80:
10234                 case FLASH_5761VENDOR_ST_A_M45PE16:
10235                 case FLASH_5761VENDOR_ST_M_M45PE20:
10236                 case FLASH_5761VENDOR_ST_M_M45PE40:
10237                 case FLASH_5761VENDOR_ST_M_M45PE80:
10238                 case FLASH_5761VENDOR_ST_M_M45PE16:
10239                         tp->nvram_jedecnum = JEDEC_ST;
10240                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10241                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10242                         tp->nvram_pagesize = 256;
10243                         break;
10244         }
10245
10246         if (protect) {
10247                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10248         } else {
10249                 switch (nvcfg1) {
10250                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10251                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10252                         case FLASH_5761VENDOR_ST_A_M45PE16:
10253                         case FLASH_5761VENDOR_ST_M_M45PE16:
10254                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10255                                 break;
10256                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10257                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10258                         case FLASH_5761VENDOR_ST_A_M45PE80:
10259                         case FLASH_5761VENDOR_ST_M_M45PE80:
10260                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10261                                 break;
10262                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10263                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10264                         case FLASH_5761VENDOR_ST_A_M45PE40:
10265                         case FLASH_5761VENDOR_ST_M_M45PE40:
10266                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10267                                 break;
10268                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10269                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10270                         case FLASH_5761VENDOR_ST_A_M45PE20:
10271                         case FLASH_5761VENDOR_ST_M_M45PE20:
10272                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10273                                 break;
10274                 }
10275         }
10276 }
10277
10278 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10279 {
10280         tp->nvram_jedecnum = JEDEC_ATMEL;
10281         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10282         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10283 }
10284
10285 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10286 static void __devinit tg3_nvram_init(struct tg3 *tp)
10287 {
10288         tw32_f(GRC_EEPROM_ADDR,
10289              (EEPROM_ADDR_FSM_RESET |
10290               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10291                EEPROM_ADDR_CLKPERD_SHIFT)));
10292
10293         msleep(1);
10294
10295         /* Enable seeprom accesses. */
10296         tw32_f(GRC_LOCAL_CTRL,
10297              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10298         udelay(100);
10299
10300         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10301             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10302                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10303
10304                 if (tg3_nvram_lock(tp)) {
10305                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10306                                "tg3_nvram_init failed.\n", tp->dev->name);
10307                         return;
10308                 }
10309                 tg3_enable_nvram_access(tp);
10310
10311                 tp->nvram_size = 0;
10312
10313                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10314                         tg3_get_5752_nvram_info(tp);
10315                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10316                         tg3_get_5755_nvram_info(tp);
10317                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10318                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10319                         tg3_get_5787_nvram_info(tp);
10320                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10321                         tg3_get_5761_nvram_info(tp);
10322                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10323                         tg3_get_5906_nvram_info(tp);
10324                 else
10325                         tg3_get_nvram_info(tp);
10326
10327                 if (tp->nvram_size == 0)
10328                         tg3_get_nvram_size(tp);
10329
10330                 tg3_disable_nvram_access(tp);
10331                 tg3_nvram_unlock(tp);
10332
10333         } else {
10334                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10335
10336                 tg3_get_eeprom_size(tp);
10337         }
10338 }
10339
10340 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10341                                         u32 offset, u32 *val)
10342 {
10343         u32 tmp;
10344         int i;
10345
10346         if (offset > EEPROM_ADDR_ADDR_MASK ||
10347             (offset % 4) != 0)
10348                 return -EINVAL;
10349
10350         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10351                                         EEPROM_ADDR_DEVID_MASK |
10352                                         EEPROM_ADDR_READ);
10353         tw32(GRC_EEPROM_ADDR,
10354              tmp |
10355              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10356              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10357               EEPROM_ADDR_ADDR_MASK) |
10358              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10359
10360         for (i = 0; i < 1000; i++) {
10361                 tmp = tr32(GRC_EEPROM_ADDR);
10362
10363                 if (tmp & EEPROM_ADDR_COMPLETE)
10364                         break;
10365                 msleep(1);
10366         }
10367         if (!(tmp & EEPROM_ADDR_COMPLETE))
10368                 return -EBUSY;
10369
10370         *val = tr32(GRC_EEPROM_DATA);
10371         return 0;
10372 }
10373
10374 #define NVRAM_CMD_TIMEOUT 10000
10375
10376 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10377 {
10378         int i;
10379
10380         tw32(NVRAM_CMD, nvram_cmd);
10381         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10382                 udelay(10);
10383                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10384                         udelay(10);
10385                         break;
10386                 }
10387         }
10388         if (i == NVRAM_CMD_TIMEOUT) {
10389                 return -EBUSY;
10390         }
10391         return 0;
10392 }
10393
10394 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10395 {
10396         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10397             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10398             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10399            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10400             (tp->nvram_jedecnum == JEDEC_ATMEL))
10401
10402                 addr = ((addr / tp->nvram_pagesize) <<
10403                         ATMEL_AT45DB0X1B_PAGE_POS) +
10404                        (addr % tp->nvram_pagesize);
10405
10406         return addr;
10407 }
10408
10409 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10410 {
10411         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10412             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10413             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10414            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10415             (tp->nvram_jedecnum == JEDEC_ATMEL))
10416
10417                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10418                         tp->nvram_pagesize) +
10419                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10420
10421         return addr;
10422 }
10423
10424 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10425 {
10426         int ret;
10427
10428         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10429                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10430
10431         offset = tg3_nvram_phys_addr(tp, offset);
10432
10433         if (offset > NVRAM_ADDR_MSK)
10434                 return -EINVAL;
10435
10436         ret = tg3_nvram_lock(tp);
10437         if (ret)
10438                 return ret;
10439
10440         tg3_enable_nvram_access(tp);
10441
10442         tw32(NVRAM_ADDR, offset);
10443         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10444                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10445
10446         if (ret == 0)
10447                 *val = swab32(tr32(NVRAM_RDDATA));
10448
10449         tg3_disable_nvram_access(tp);
10450
10451         tg3_nvram_unlock(tp);
10452
10453         return ret;
10454 }
10455
10456 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10457 {
10458         u32 v;
10459         int res = tg3_nvram_read(tp, offset, &v);
10460         if (!res)
10461                 *val = cpu_to_le32(v);
10462         return res;
10463 }
10464
10465 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10466 {
10467         int err;
10468         u32 tmp;
10469
10470         err = tg3_nvram_read(tp, offset, &tmp);
10471         *val = swab32(tmp);
10472         return err;
10473 }
10474
10475 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10476                                     u32 offset, u32 len, u8 *buf)
10477 {
10478         int i, j, rc = 0;
10479         u32 val;
10480
10481         for (i = 0; i < len; i += 4) {
10482                 u32 addr;
10483                 __le32 data;
10484
10485                 addr = offset + i;
10486
10487                 memcpy(&data, buf + i, 4);
10488
10489                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10490
10491                 val = tr32(GRC_EEPROM_ADDR);
10492                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10493
10494                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10495                         EEPROM_ADDR_READ);
10496                 tw32(GRC_EEPROM_ADDR, val |
10497                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10498                         (addr & EEPROM_ADDR_ADDR_MASK) |
10499                         EEPROM_ADDR_START |
10500                         EEPROM_ADDR_WRITE);
10501
10502                 for (j = 0; j < 1000; j++) {
10503                         val = tr32(GRC_EEPROM_ADDR);
10504
10505                         if (val & EEPROM_ADDR_COMPLETE)
10506                                 break;
10507                         msleep(1);
10508                 }
10509                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10510                         rc = -EBUSY;
10511                         break;
10512                 }
10513         }
10514
10515         return rc;
10516 }
10517
10518 /* offset and length are dword aligned */
10519 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10520                 u8 *buf)
10521 {
10522         int ret = 0;
10523         u32 pagesize = tp->nvram_pagesize;
10524         u32 pagemask = pagesize - 1;
10525         u32 nvram_cmd;
10526         u8 *tmp;
10527
10528         tmp = kmalloc(pagesize, GFP_KERNEL);
10529         if (tmp == NULL)
10530                 return -ENOMEM;
10531
10532         while (len) {
10533                 int j;
10534                 u32 phy_addr, page_off, size;
10535
10536                 phy_addr = offset & ~pagemask;
10537
10538                 for (j = 0; j < pagesize; j += 4) {
10539                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10540                                                 (__le32 *) (tmp + j))))
10541                                 break;
10542                 }
10543                 if (ret)
10544                         break;
10545
10546                 page_off = offset & pagemask;
10547                 size = pagesize;
10548                 if (len < size)
10549                         size = len;
10550
10551                 len -= size;
10552
10553                 memcpy(tmp + page_off, buf, size);
10554
10555                 offset = offset + (pagesize - page_off);
10556
10557                 tg3_enable_nvram_access(tp);
10558
10559                 /*
10560                  * Before we can erase the flash page, we need
10561                  * to issue a special "write enable" command.
10562                  */
10563                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10564
10565                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10566                         break;
10567
10568                 /* Erase the target page */
10569                 tw32(NVRAM_ADDR, phy_addr);
10570
10571                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10572                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10573
10574                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10575                         break;
10576
10577                 /* Issue another write enable to start the write. */
10578                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10579
10580                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10581                         break;
10582
10583                 for (j = 0; j < pagesize; j += 4) {
10584                         __be32 data;
10585
10586                         data = *((__be32 *) (tmp + j));
10587                         /* swab32(le32_to_cpu(data)), actually */
10588                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10589
10590                         tw32(NVRAM_ADDR, phy_addr + j);
10591
10592                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10593                                 NVRAM_CMD_WR;
10594
10595                         if (j == 0)
10596                                 nvram_cmd |= NVRAM_CMD_FIRST;
10597                         else if (j == (pagesize - 4))
10598                                 nvram_cmd |= NVRAM_CMD_LAST;
10599
10600                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10601                                 break;
10602                 }
10603                 if (ret)
10604                         break;
10605         }
10606
10607         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10608         tg3_nvram_exec_cmd(tp, nvram_cmd);
10609
10610         kfree(tmp);
10611
10612         return ret;
10613 }
10614
10615 /* offset and length are dword aligned */
10616 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10617                 u8 *buf)
10618 {
10619         int i, ret = 0;
10620
10621         for (i = 0; i < len; i += 4, offset += 4) {
10622                 u32 page_off, phy_addr, nvram_cmd;
10623                 __be32 data;
10624
10625                 memcpy(&data, buf + i, 4);
10626                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10627
10628                 page_off = offset % tp->nvram_pagesize;
10629
10630                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10631
10632                 tw32(NVRAM_ADDR, phy_addr);
10633
10634                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10635
10636                 if ((page_off == 0) || (i == 0))
10637                         nvram_cmd |= NVRAM_CMD_FIRST;
10638                 if (page_off == (tp->nvram_pagesize - 4))
10639                         nvram_cmd |= NVRAM_CMD_LAST;
10640
10641                 if (i == (len - 4))
10642                         nvram_cmd |= NVRAM_CMD_LAST;
10643
10644                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10645                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10646                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10647                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10648                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10649                     (tp->nvram_jedecnum == JEDEC_ST) &&
10650                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10651
10652                         if ((ret = tg3_nvram_exec_cmd(tp,
10653                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10654                                 NVRAM_CMD_DONE)))
10655
10656                                 break;
10657                 }
10658                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10659                         /* We always do complete word writes to eeprom. */
10660                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10661                 }
10662
10663                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10664                         break;
10665         }
10666         return ret;
10667 }
10668
10669 /* offset and length are dword aligned */
10670 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10671 {
10672         int ret;
10673
10674         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10675                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10676                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10677                 udelay(40);
10678         }
10679
10680         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10681                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10682         }
10683         else {
10684                 u32 grc_mode;
10685
10686                 ret = tg3_nvram_lock(tp);
10687                 if (ret)
10688                         return ret;
10689
10690                 tg3_enable_nvram_access(tp);
10691                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10692                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10693                         tw32(NVRAM_WRITE1, 0x406);
10694
10695                 grc_mode = tr32(GRC_MODE);
10696                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10697
10698                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10699                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10700
10701                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10702                                 buf);
10703                 }
10704                 else {
10705                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10706                                 buf);
10707                 }
10708
10709                 grc_mode = tr32(GRC_MODE);
10710                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10711
10712                 tg3_disable_nvram_access(tp);
10713                 tg3_nvram_unlock(tp);
10714         }
10715
10716         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10717                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10718                 udelay(40);
10719         }
10720
10721         return ret;
10722 }
10723
10724 struct subsys_tbl_ent {
10725         u16 subsys_vendor, subsys_devid;
10726         u32 phy_id;
10727 };
10728
10729 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10730         /* Broadcom boards. */
10731         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10732         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10733         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10734         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10735         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10736         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10737         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10738         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10739         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10740         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10741         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10742
10743         /* 3com boards. */
10744         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10745         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10746         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10747         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10748         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10749
10750         /* DELL boards. */
10751         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10752         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10753         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10754         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10755
10756         /* Compaq boards. */
10757         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10758         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10759         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10760         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10761         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10762
10763         /* IBM boards. */
10764         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10765 };
10766
10767 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10768 {
10769         int i;
10770
10771         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10772                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10773                      tp->pdev->subsystem_vendor) &&
10774                     (subsys_id_to_phy_id[i].subsys_devid ==
10775                      tp->pdev->subsystem_device))
10776                         return &subsys_id_to_phy_id[i];
10777         }
10778         return NULL;
10779 }
10780
10781 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10782 {
10783         u32 val;
10784         u16 pmcsr;
10785
10786         /* On some early chips the SRAM cannot be accessed in D3hot state,
10787          * so need make sure we're in D0.
10788          */
10789         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10790         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10791         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10792         msleep(1);
10793
10794         /* Make sure register accesses (indirect or otherwise)
10795          * will function correctly.
10796          */
10797         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10798                                tp->misc_host_ctrl);
10799
10800         /* The memory arbiter has to be enabled in order for SRAM accesses
10801          * to succeed.  Normally on powerup the tg3 chip firmware will make
10802          * sure it is enabled, but other entities such as system netboot
10803          * code might disable it.
10804          */
10805         val = tr32(MEMARB_MODE);
10806         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10807
10808         tp->phy_id = PHY_ID_INVALID;
10809         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10810
10811         /* Assume an onboard device and WOL capable by default.  */
10812         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10813
10814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10815                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10816                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10817                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10818                 }
10819                 val = tr32(VCPU_CFGSHDW);
10820                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10821                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10822                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10823                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10824                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10825                 return;
10826         }
10827
10828         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10829         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10830                 u32 nic_cfg, led_cfg;
10831                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10832                 int eeprom_phy_serdes = 0;
10833
10834                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10835                 tp->nic_sram_data_cfg = nic_cfg;
10836
10837                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10838                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10839                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10840                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10841                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10842                     (ver > 0) && (ver < 0x100))
10843                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10844
10845                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10846                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10847                         eeprom_phy_serdes = 1;
10848
10849                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10850                 if (nic_phy_id != 0) {
10851                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10852                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10853
10854                         eeprom_phy_id  = (id1 >> 16) << 10;
10855                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10856                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10857                 } else
10858                         eeprom_phy_id = 0;
10859
10860                 tp->phy_id = eeprom_phy_id;
10861                 if (eeprom_phy_serdes) {
10862                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10863                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10864                         else
10865                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10866                 }
10867
10868                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10869                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10870                                     SHASTA_EXT_LED_MODE_MASK);
10871                 else
10872                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10873
10874                 switch (led_cfg) {
10875                 default:
10876                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10877                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10878                         break;
10879
10880                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10881                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10882                         break;
10883
10884                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10885                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10886
10887                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10888                          * read on some older 5700/5701 bootcode.
10889                          */
10890                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10891                             ASIC_REV_5700 ||
10892                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10893                             ASIC_REV_5701)
10894                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10895
10896                         break;
10897
10898                 case SHASTA_EXT_LED_SHARED:
10899                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10900                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10901                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10902                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10903                                                  LED_CTRL_MODE_PHY_2);
10904                         break;
10905
10906                 case SHASTA_EXT_LED_MAC:
10907                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10908                         break;
10909
10910                 case SHASTA_EXT_LED_COMBO:
10911                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10912                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10913                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10914                                                  LED_CTRL_MODE_PHY_2);
10915                         break;
10916
10917                 };
10918
10919                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10920                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10921                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10922                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10923
10924                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10925                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10926
10927                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10928                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10929                         if ((tp->pdev->subsystem_vendor ==
10930                              PCI_VENDOR_ID_ARIMA) &&
10931                             (tp->pdev->subsystem_device == 0x205a ||
10932                              tp->pdev->subsystem_device == 0x2063))
10933                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10934                 } else {
10935                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10936                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10937                 }
10938
10939                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10940                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10941                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10942                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10943                 }
10944                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10945                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10946                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10947                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10948                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10949
10950                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10951                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10952                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10953
10954                 if (cfg2 & (1 << 17))
10955                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10956
10957                 /* serdes signal pre-emphasis in register 0x590 set by */
10958                 /* bootcode if bit 18 is set */
10959                 if (cfg2 & (1 << 18))
10960                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10961
10962                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10963                         u32 cfg3;
10964
10965                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10966                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10967                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10968                 }
10969         }
10970 }
10971
10972 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10973 {
10974         int i;
10975         u32 val;
10976
10977         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10978         tw32(OTP_CTRL, cmd);
10979
10980         /* Wait for up to 1 ms for command to execute. */
10981         for (i = 0; i < 100; i++) {
10982                 val = tr32(OTP_STATUS);
10983                 if (val & OTP_STATUS_CMD_DONE)
10984                         break;
10985                 udelay(10);
10986         }
10987
10988         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10989 }
10990
10991 /* Read the gphy configuration from the OTP region of the chip.  The gphy
10992  * configuration is a 32-bit value that straddles the alignment boundary.
10993  * We do two 32-bit reads and then shift and merge the results.
10994  */
10995 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10996 {
10997         u32 bhalf_otp, thalf_otp;
10998
10999         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11000
11001         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11002                 return 0;
11003
11004         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11005
11006         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11007                 return 0;
11008
11009         thalf_otp = tr32(OTP_READ_DATA);
11010
11011         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11012
11013         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11014                 return 0;
11015
11016         bhalf_otp = tr32(OTP_READ_DATA);
11017
11018         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11019 }
11020
11021 static int __devinit tg3_phy_probe(struct tg3 *tp)
11022 {
11023         u32 hw_phy_id_1, hw_phy_id_2;
11024         u32 hw_phy_id, hw_phy_id_masked;
11025         int err;
11026
11027         /* Reading the PHY ID register can conflict with ASF
11028          * firwmare access to the PHY hardware.
11029          */
11030         err = 0;
11031         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11032             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11033                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11034         } else {
11035                 /* Now read the physical PHY_ID from the chip and verify
11036                  * that it is sane.  If it doesn't look good, we fall back
11037                  * to either the hard-coded table based PHY_ID and failing
11038                  * that the value found in the eeprom area.
11039                  */
11040                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11041                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11042
11043                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11044                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11045                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11046
11047                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11048         }
11049
11050         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11051                 tp->phy_id = hw_phy_id;
11052                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11053                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11054                 else
11055                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11056         } else {
11057                 if (tp->phy_id != PHY_ID_INVALID) {
11058                         /* Do nothing, phy ID already set up in
11059                          * tg3_get_eeprom_hw_cfg().
11060                          */
11061                 } else {
11062                         struct subsys_tbl_ent *p;
11063
11064                         /* No eeprom signature?  Try the hardcoded
11065                          * subsys device table.
11066                          */
11067                         p = lookup_by_subsys(tp);
11068                         if (!p)
11069                                 return -ENODEV;
11070
11071                         tp->phy_id = p->phy_id;
11072                         if (!tp->phy_id ||
11073                             tp->phy_id == PHY_ID_BCM8002)
11074                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11075                 }
11076         }
11077
11078         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11079             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11080             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11081                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11082
11083                 tg3_readphy(tp, MII_BMSR, &bmsr);
11084                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11085                     (bmsr & BMSR_LSTATUS))
11086                         goto skip_phy_reset;
11087
11088                 err = tg3_phy_reset(tp);
11089                 if (err)
11090                         return err;
11091
11092                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11093                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11094                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11095                 tg3_ctrl = 0;
11096                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11097                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11098                                     MII_TG3_CTRL_ADV_1000_FULL);
11099                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11100                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11101                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11102                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11103                 }
11104
11105                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11106                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11107                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11108                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11109                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11110
11111                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11112                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11113
11114                         tg3_writephy(tp, MII_BMCR,
11115                                      BMCR_ANENABLE | BMCR_ANRESTART);
11116                 }
11117                 tg3_phy_set_wirespeed(tp);
11118
11119                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11120                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11121                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11122         }
11123
11124 skip_phy_reset:
11125         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11126                 err = tg3_init_5401phy_dsp(tp);
11127                 if (err)
11128                         return err;
11129         }
11130
11131         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11132                 err = tg3_init_5401phy_dsp(tp);
11133         }
11134
11135         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11136                 tp->link_config.advertising =
11137                         (ADVERTISED_1000baseT_Half |
11138                          ADVERTISED_1000baseT_Full |
11139                          ADVERTISED_Autoneg |
11140                          ADVERTISED_FIBRE);
11141         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11142                 tp->link_config.advertising &=
11143                         ~(ADVERTISED_1000baseT_Half |
11144                           ADVERTISED_1000baseT_Full);
11145
11146         return err;
11147 }
11148
11149 static void __devinit tg3_read_partno(struct tg3 *tp)
11150 {
11151         unsigned char vpd_data[256];
11152         unsigned int i;
11153         u32 magic;
11154
11155         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11156                 goto out_not_found;
11157
11158         if (magic == TG3_EEPROM_MAGIC) {
11159                 for (i = 0; i < 256; i += 4) {
11160                         u32 tmp;
11161
11162                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11163                                 goto out_not_found;
11164
11165                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11166                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11167                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11168                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11169                 }
11170         } else {
11171                 int vpd_cap;
11172
11173                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11174                 for (i = 0; i < 256; i += 4) {
11175                         u32 tmp, j = 0;
11176                         __le32 v;
11177                         u16 tmp16;
11178
11179                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11180                                               i);
11181                         while (j++ < 100) {
11182                                 pci_read_config_word(tp->pdev, vpd_cap +
11183                                                      PCI_VPD_ADDR, &tmp16);
11184                                 if (tmp16 & 0x8000)
11185                                         break;
11186                                 msleep(1);
11187                         }
11188                         if (!(tmp16 & 0x8000))
11189                                 goto out_not_found;
11190
11191                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11192                                               &tmp);
11193                         v = cpu_to_le32(tmp);
11194                         memcpy(&vpd_data[i], &v, 4);
11195                 }
11196         }
11197
11198         /* Now parse and find the part number. */
11199         for (i = 0; i < 254; ) {
11200                 unsigned char val = vpd_data[i];
11201                 unsigned int block_end;
11202
11203                 if (val == 0x82 || val == 0x91) {
11204                         i = (i + 3 +
11205                              (vpd_data[i + 1] +
11206                               (vpd_data[i + 2] << 8)));
11207                         continue;
11208                 }
11209
11210                 if (val != 0x90)
11211                         goto out_not_found;
11212
11213                 block_end = (i + 3 +
11214                              (vpd_data[i + 1] +
11215                               (vpd_data[i + 2] << 8)));
11216                 i += 3;
11217
11218                 if (block_end > 256)
11219                         goto out_not_found;
11220
11221                 while (i < (block_end - 2)) {
11222                         if (vpd_data[i + 0] == 'P' &&
11223                             vpd_data[i + 1] == 'N') {
11224                                 int partno_len = vpd_data[i + 2];
11225
11226                                 i += 3;
11227                                 if (partno_len > 24 || (partno_len + i) > 256)
11228                                         goto out_not_found;
11229
11230                                 memcpy(tp->board_part_number,
11231                                        &vpd_data[i], partno_len);
11232
11233                                 /* Success. */
11234                                 return;
11235                         }
11236                         i += 3 + vpd_data[i + 2];
11237                 }
11238
11239                 /* Part number not found. */
11240                 goto out_not_found;
11241         }
11242
11243 out_not_found:
11244         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11245                 strcpy(tp->board_part_number, "BCM95906");
11246         else
11247                 strcpy(tp->board_part_number, "none");
11248 }
11249
11250 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11251 {
11252         u32 val;
11253
11254         if (tg3_nvram_read_swab(tp, offset, &val) ||
11255             (val & 0xfc000000) != 0x0c000000 ||
11256             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11257             val != 0)
11258                 return 0;
11259
11260         return 1;
11261 }
11262
11263 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11264 {
11265         u32 val, offset, start;
11266         u32 ver_offset;
11267         int i, bcnt;
11268
11269         if (tg3_nvram_read_swab(tp, 0, &val))
11270                 return;
11271
11272         if (val != TG3_EEPROM_MAGIC)
11273                 return;
11274
11275         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11276             tg3_nvram_read_swab(tp, 0x4, &start))
11277                 return;
11278
11279         offset = tg3_nvram_logical_addr(tp, offset);
11280
11281         if (!tg3_fw_img_is_valid(tp, offset) ||
11282             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11283                 return;
11284
11285         offset = offset + ver_offset - start;
11286         for (i = 0; i < 16; i += 4) {
11287                 __le32 v;
11288                 if (tg3_nvram_read_le(tp, offset + i, &v))
11289                         return;
11290
11291                 memcpy(tp->fw_ver + i, &v, 4);
11292         }
11293
11294         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11295              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11296                 return;
11297
11298         for (offset = TG3_NVM_DIR_START;
11299              offset < TG3_NVM_DIR_END;
11300              offset += TG3_NVM_DIRENT_SIZE) {
11301                 if (tg3_nvram_read_swab(tp, offset, &val))
11302                         return;
11303
11304                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11305                         break;
11306         }
11307
11308         if (offset == TG3_NVM_DIR_END)
11309                 return;
11310
11311         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11312                 start = 0x08000000;
11313         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11314                 return;
11315
11316         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11317             !tg3_fw_img_is_valid(tp, offset) ||
11318             tg3_nvram_read_swab(tp, offset + 8, &val))
11319                 return;
11320
11321         offset += val - start;
11322
11323         bcnt = strlen(tp->fw_ver);
11324
11325         tp->fw_ver[bcnt++] = ',';
11326         tp->fw_ver[bcnt++] = ' ';
11327
11328         for (i = 0; i < 4; i++) {
11329                 __le32 v;
11330                 if (tg3_nvram_read_le(tp, offset, &v))
11331                         return;
11332
11333                 offset += sizeof(v);
11334
11335                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11336                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11337                         break;
11338                 }
11339
11340                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11341                 bcnt += sizeof(v);
11342         }
11343
11344         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11345 }
11346
11347 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11348
11349 static int __devinit tg3_get_invariants(struct tg3 *tp)
11350 {
11351         static struct pci_device_id write_reorder_chipsets[] = {
11352                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11353                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11354                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11355                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11356                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11357                              PCI_DEVICE_ID_VIA_8385_0) },
11358                 { },
11359         };
11360         u32 misc_ctrl_reg;
11361         u32 cacheline_sz_reg;
11362         u32 pci_state_reg, grc_misc_cfg;
11363         u32 val;
11364         u16 pci_cmd;
11365         int err, pcie_cap;
11366
11367         /* Force memory write invalidate off.  If we leave it on,
11368          * then on 5700_BX chips we have to enable a workaround.
11369          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11370          * to match the cacheline size.  The Broadcom driver have this
11371          * workaround but turns MWI off all the times so never uses
11372          * it.  This seems to suggest that the workaround is insufficient.
11373          */
11374         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11375         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11376         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11377
11378         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11379          * has the register indirect write enable bit set before
11380          * we try to access any of the MMIO registers.  It is also
11381          * critical that the PCI-X hw workaround situation is decided
11382          * before that as well.
11383          */
11384         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11385                               &misc_ctrl_reg);
11386
11387         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11388                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11390                 u32 prod_id_asic_rev;
11391
11392                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11393                                       &prod_id_asic_rev);
11394                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11395         }
11396
11397         /* Wrong chip ID in 5752 A0. This code can be removed later
11398          * as A0 is not in production.
11399          */
11400         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11401                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11402
11403         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11404          * we need to disable memory and use config. cycles
11405          * only to access all registers. The 5702/03 chips
11406          * can mistakenly decode the special cycles from the
11407          * ICH chipsets as memory write cycles, causing corruption
11408          * of register and memory space. Only certain ICH bridges
11409          * will drive special cycles with non-zero data during the
11410          * address phase which can fall within the 5703's address
11411          * range. This is not an ICH bug as the PCI spec allows
11412          * non-zero address during special cycles. However, only
11413          * these ICH bridges are known to drive non-zero addresses
11414          * during special cycles.
11415          *
11416          * Since special cycles do not cross PCI bridges, we only
11417          * enable this workaround if the 5703 is on the secondary
11418          * bus of these ICH bridges.
11419          */
11420         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11421             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11422                 static struct tg3_dev_id {
11423                         u32     vendor;
11424                         u32     device;
11425                         u32     rev;
11426                 } ich_chipsets[] = {
11427                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11428                           PCI_ANY_ID },
11429                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11430                           PCI_ANY_ID },
11431                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11432                           0xa },
11433                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11434                           PCI_ANY_ID },
11435                         { },
11436                 };
11437                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11438                 struct pci_dev *bridge = NULL;
11439
11440                 while (pci_id->vendor != 0) {
11441                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11442                                                 bridge);
11443                         if (!bridge) {
11444                                 pci_id++;
11445                                 continue;
11446                         }
11447                         if (pci_id->rev != PCI_ANY_ID) {
11448                                 if (bridge->revision > pci_id->rev)
11449                                         continue;
11450                         }
11451                         if (bridge->subordinate &&
11452                             (bridge->subordinate->number ==
11453                              tp->pdev->bus->number)) {
11454
11455                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11456                                 pci_dev_put(bridge);
11457                                 break;
11458                         }
11459                 }
11460         }
11461
11462         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11463                 static struct tg3_dev_id {
11464                         u32     vendor;
11465                         u32     device;
11466                 } bridge_chipsets[] = {
11467                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11468                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11469                         { },
11470                 };
11471                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11472                 struct pci_dev *bridge = NULL;
11473
11474                 while (pci_id->vendor != 0) {
11475                         bridge = pci_get_device(pci_id->vendor,
11476                                                 pci_id->device,
11477                                                 bridge);
11478                         if (!bridge) {
11479                                 pci_id++;
11480                                 continue;
11481                         }
11482                         if (bridge->subordinate &&
11483                             (bridge->subordinate->number <=
11484                              tp->pdev->bus->number) &&
11485                             (bridge->subordinate->subordinate >=
11486                              tp->pdev->bus->number)) {
11487                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11488                                 pci_dev_put(bridge);
11489                                 break;
11490                         }
11491                 }
11492         }
11493
11494         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11495          * DMA addresses > 40-bit. This bridge may have other additional
11496          * 57xx devices behind it in some 4-port NIC designs for example.
11497          * Any tg3 device found behind the bridge will also need the 40-bit
11498          * DMA workaround.
11499          */
11500         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11501             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11502                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11503                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11504                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11505         }
11506         else {
11507                 struct pci_dev *bridge = NULL;
11508
11509                 do {
11510                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11511                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11512                                                 bridge);
11513                         if (bridge && bridge->subordinate &&
11514                             (bridge->subordinate->number <=
11515                              tp->pdev->bus->number) &&
11516                             (bridge->subordinate->subordinate >=
11517                              tp->pdev->bus->number)) {
11518                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11519                                 pci_dev_put(bridge);
11520                                 break;
11521                         }
11522                 } while (bridge);
11523         }
11524
11525         /* Initialize misc host control in PCI block. */
11526         tp->misc_host_ctrl |= (misc_ctrl_reg &
11527                                MISC_HOST_CTRL_CHIPREV);
11528         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11529                                tp->misc_host_ctrl);
11530
11531         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11532                               &cacheline_sz_reg);
11533
11534         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11535         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11536         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11537         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11538
11539         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11540             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11541                 tp->pdev_peer = tg3_find_peer(tp);
11542
11543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11547             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11548             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11550             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11551                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11552
11553         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11554             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11555                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11556
11557         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11558                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11559                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11560                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11561                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11562                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11563                      tp->pdev_peer == tp->pdev))
11564                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11565
11566                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11567                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11568                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11569                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11570                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11571                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11572                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11573                 } else {
11574                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11575                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11576                                 ASIC_REV_5750 &&
11577                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11578                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11579                 }
11580         }
11581
11582         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11583             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11584             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11585             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11586             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11587             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11588             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11589             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11590                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11591
11592         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11593         if (pcie_cap != 0) {
11594                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11595
11596                 pcie_set_readrq(tp->pdev, 4096);
11597
11598                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11599                         u16 lnkctl;
11600
11601                         pci_read_config_word(tp->pdev,
11602                                              pcie_cap + PCI_EXP_LNKCTL,
11603                                              &lnkctl);
11604                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11605                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11606                 }
11607         }
11608
11609         /* If we have an AMD 762 or VIA K8T800 chipset, write
11610          * reordering to the mailbox registers done by the host
11611          * controller can cause major troubles.  We read back from
11612          * every mailbox register write to force the writes to be
11613          * posted to the chip in order.
11614          */
11615         if (pci_dev_present(write_reorder_chipsets) &&
11616             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11617                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11618
11619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11620             tp->pci_lat_timer < 64) {
11621                 tp->pci_lat_timer = 64;
11622
11623                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11624                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11625                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11626                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11627
11628                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11629                                        cacheline_sz_reg);
11630         }
11631
11632         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11633             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11634                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11635                 if (!tp->pcix_cap) {
11636                         printk(KERN_ERR PFX "Cannot find PCI-X "
11637                                             "capability, aborting.\n");
11638                         return -EIO;
11639                 }
11640         }
11641
11642         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11643                               &pci_state_reg);
11644
11645         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11646                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11647
11648                 /* If this is a 5700 BX chipset, and we are in PCI-X
11649                  * mode, enable register write workaround.
11650                  *
11651                  * The workaround is to use indirect register accesses
11652                  * for all chip writes not to mailbox registers.
11653                  */
11654                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11655                         u32 pm_reg;
11656
11657                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11658
11659                         /* The chip can have it's power management PCI config
11660                          * space registers clobbered due to this bug.
11661                          * So explicitly force the chip into D0 here.
11662                          */
11663                         pci_read_config_dword(tp->pdev,
11664                                               tp->pm_cap + PCI_PM_CTRL,
11665                                               &pm_reg);
11666                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11667                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11668                         pci_write_config_dword(tp->pdev,
11669                                                tp->pm_cap + PCI_PM_CTRL,
11670                                                pm_reg);
11671
11672                         /* Also, force SERR#/PERR# in PCI command. */
11673                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11674                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11675                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11676                 }
11677         }
11678
11679         /* 5700 BX chips need to have their TX producer index mailboxes
11680          * written twice to workaround a bug.
11681          */
11682         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11683                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11684
11685         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11686                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11687         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11688                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11689
11690         /* Chip-specific fixup from Broadcom driver */
11691         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11692             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11693                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11694                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11695         }
11696
11697         /* Default fast path register access methods */
11698         tp->read32 = tg3_read32;
11699         tp->write32 = tg3_write32;
11700         tp->read32_mbox = tg3_read32;
11701         tp->write32_mbox = tg3_write32;
11702         tp->write32_tx_mbox = tg3_write32;
11703         tp->write32_rx_mbox = tg3_write32;
11704
11705         /* Various workaround register access methods */
11706         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11707                 tp->write32 = tg3_write_indirect_reg32;
11708         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11709                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11710                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11711                 /*
11712                  * Back to back register writes can cause problems on these
11713                  * chips, the workaround is to read back all reg writes
11714                  * except those to mailbox regs.
11715                  *
11716                  * See tg3_write_indirect_reg32().
11717                  */
11718                 tp->write32 = tg3_write_flush_reg32;
11719         }
11720
11721
11722         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11723             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11724                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11725                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11726                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11727         }
11728
11729         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11730                 tp->read32 = tg3_read_indirect_reg32;
11731                 tp->write32 = tg3_write_indirect_reg32;
11732                 tp->read32_mbox = tg3_read_indirect_mbox;
11733                 tp->write32_mbox = tg3_write_indirect_mbox;
11734                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11735                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11736
11737                 iounmap(tp->regs);
11738                 tp->regs = NULL;
11739
11740                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11741                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11742                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11743         }
11744         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11745                 tp->read32_mbox = tg3_read32_mbox_5906;
11746                 tp->write32_mbox = tg3_write32_mbox_5906;
11747                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11748                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11749         }
11750
11751         if (tp->write32 == tg3_write_indirect_reg32 ||
11752             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11753              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11754               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11755                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11756
11757         /* Get eeprom hw config before calling tg3_set_power_state().
11758          * In particular, the TG3_FLG2_IS_NIC flag must be
11759          * determined before calling tg3_set_power_state() so that
11760          * we know whether or not to switch out of Vaux power.
11761          * When the flag is set, it means that GPIO1 is used for eeprom
11762          * write protect and also implies that it is a LOM where GPIOs
11763          * are not used to switch power.
11764          */
11765         tg3_get_eeprom_hw_cfg(tp);
11766
11767         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11768                 /* Allow reads and writes to the
11769                  * APE register and memory space.
11770                  */
11771                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11772                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11773                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11774                                        pci_state_reg);
11775         }
11776
11777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11778             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11779                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11780
11781                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11782                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11783                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11784                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11785                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11786         }
11787
11788         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11789          * GPIO1 driven high will bring 5700's external PHY out of reset.
11790          * It is also used as eeprom write protect on LOMs.
11791          */
11792         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11793         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11794             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11795                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11796                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11797         /* Unused GPIO3 must be driven as output on 5752 because there
11798          * are no pull-up resistors on unused GPIO pins.
11799          */
11800         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11801                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11802
11803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11804                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11805
11806         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
11807                 /* Turn off the debug UART. */
11808                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11809                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
11810                         /* Keep VMain power. */
11811                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
11812                                               GRC_LCLCTRL_GPIO_OUTPUT0;
11813         }
11814
11815         /* Force the chip into D0. */
11816         err = tg3_set_power_state(tp, PCI_D0);
11817         if (err) {
11818                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11819                        pci_name(tp->pdev));
11820                 return err;
11821         }
11822
11823         /* 5700 B0 chips do not support checksumming correctly due
11824          * to hardware bugs.
11825          */
11826         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11827                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11828
11829         /* Derive initial jumbo mode from MTU assigned in
11830          * ether_setup() via the alloc_etherdev() call
11831          */
11832         if (tp->dev->mtu > ETH_DATA_LEN &&
11833             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11834                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11835
11836         /* Determine WakeOnLan speed to use. */
11837         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11838             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11839             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11840             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11841                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11842         } else {
11843                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11844         }
11845
11846         /* A few boards don't want Ethernet@WireSpeed phy feature */
11847         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11848             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11849              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11850              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11851             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11852             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11853                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11854
11855         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11856             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11857                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11858         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11859                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11860
11861         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11863                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11864                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11865                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11866                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11867                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11868                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11869                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11870                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11871                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11872                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11873         }
11874
11875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11876             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11877                 tp->phy_otp = tg3_read_otp_phycfg(tp);
11878                 if (tp->phy_otp == 0)
11879                         tp->phy_otp = TG3_OTP_DEFAULT;
11880         }
11881
11882         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11883             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11884                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11885         else
11886                 tp->mi_mode = MAC_MI_MODE_BASE;
11887
11888         tp->coalesce_mode = 0;
11889         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11890             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11891                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11892
11893         /* Initialize MAC MI mode, polling disabled. */
11894         tw32_f(MAC_MI_MODE, tp->mi_mode);
11895         udelay(80);
11896
11897         /* Initialize data/descriptor byte/word swapping. */
11898         val = tr32(GRC_MODE);
11899         val &= GRC_MODE_HOST_STACKUP;
11900         tw32(GRC_MODE, val | tp->grc_mode);
11901
11902         tg3_switch_clocks(tp);
11903
11904         /* Clear this out for sanity. */
11905         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11906
11907         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11908                               &pci_state_reg);
11909         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11910             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11911                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11912
11913                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11914                     chiprevid == CHIPREV_ID_5701_B0 ||
11915                     chiprevid == CHIPREV_ID_5701_B2 ||
11916                     chiprevid == CHIPREV_ID_5701_B5) {
11917                         void __iomem *sram_base;
11918
11919                         /* Write some dummy words into the SRAM status block
11920                          * area, see if it reads back correctly.  If the return
11921                          * value is bad, force enable the PCIX workaround.
11922                          */
11923                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11924
11925                         writel(0x00000000, sram_base);
11926                         writel(0x00000000, sram_base + 4);
11927                         writel(0xffffffff, sram_base + 4);
11928                         if (readl(sram_base) != 0x00000000)
11929                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11930                 }
11931         }
11932
11933         udelay(50);
11934         tg3_nvram_init(tp);
11935
11936         grc_misc_cfg = tr32(GRC_MISC_CFG);
11937         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11938
11939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11940             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11941              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11942                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11943
11944         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11945             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11946                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11947         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11948                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11949                                       HOSTCC_MODE_CLRTICK_TXBD);
11950
11951                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11952                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11953                                        tp->misc_host_ctrl);
11954         }
11955
11956         /* these are limited to 10/100 only */
11957         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11958              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11959             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11960              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11961              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11962               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11963               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11964             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11965              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11966               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11967               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11969                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11970
11971         err = tg3_phy_probe(tp);
11972         if (err) {
11973                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11974                        pci_name(tp->pdev), err);
11975                 /* ... but do not return immediately ... */
11976         }
11977
11978         tg3_read_partno(tp);
11979         tg3_read_fw_ver(tp);
11980
11981         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11982                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11983         } else {
11984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11985                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11986                 else
11987                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11988         }
11989
11990         /* 5700 {AX,BX} chips have a broken status block link
11991          * change bit implementation, so we must use the
11992          * status register in those cases.
11993          */
11994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11995                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11996         else
11997                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11998
11999         /* The led_ctrl is set during tg3_phy_probe, here we might
12000          * have to force the link status polling mechanism based
12001          * upon subsystem IDs.
12002          */
12003         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12004             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12005             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12006                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12007                                   TG3_FLAG_USE_LINKCHG_REG);
12008         }
12009
12010         /* For all SERDES we poll the MAC status register. */
12011         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12012                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12013         else
12014                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12015
12016         /* All chips before 5787 can get confused if TX buffers
12017          * straddle the 4GB address boundary in some cases.
12018          */
12019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12023             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12024                 tp->dev->hard_start_xmit = tg3_start_xmit;
12025         else
12026                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12027
12028         tp->rx_offset = 2;
12029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12030             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12031                 tp->rx_offset = 0;
12032
12033         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12034
12035         /* Increment the rx prod index on the rx std ring by at most
12036          * 8 for these chips to workaround hw errata.
12037          */
12038         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12039             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12041                 tp->rx_std_max_post = 8;
12042
12043         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12044                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12045                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12046
12047         return err;
12048 }
12049
12050 #ifdef CONFIG_SPARC
12051 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12052 {
12053         struct net_device *dev = tp->dev;
12054         struct pci_dev *pdev = tp->pdev;
12055         struct device_node *dp = pci_device_to_OF_node(pdev);
12056         const unsigned char *addr;
12057         int len;
12058
12059         addr = of_get_property(dp, "local-mac-address", &len);
12060         if (addr && len == 6) {
12061                 memcpy(dev->dev_addr, addr, 6);
12062                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12063                 return 0;
12064         }
12065         return -ENODEV;
12066 }
12067
12068 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12069 {
12070         struct net_device *dev = tp->dev;
12071
12072         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12073         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12074         return 0;
12075 }
12076 #endif
12077
12078 static int __devinit tg3_get_device_address(struct tg3 *tp)
12079 {
12080         struct net_device *dev = tp->dev;
12081         u32 hi, lo, mac_offset;
12082         int addr_ok = 0;
12083
12084 #ifdef CONFIG_SPARC
12085         if (!tg3_get_macaddr_sparc(tp))
12086                 return 0;
12087 #endif
12088
12089         mac_offset = 0x7c;
12090         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12091             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12092                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12093                         mac_offset = 0xcc;
12094                 if (tg3_nvram_lock(tp))
12095                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12096                 else
12097                         tg3_nvram_unlock(tp);
12098         }
12099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12100                 mac_offset = 0x10;
12101
12102         /* First try to get it from MAC address mailbox. */
12103         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12104         if ((hi >> 16) == 0x484b) {
12105                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12106                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12107
12108                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12109                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12110                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12111                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12112                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12113
12114                 /* Some old bootcode may report a 0 MAC address in SRAM */
12115                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12116         }
12117         if (!addr_ok) {
12118                 /* Next, try NVRAM. */
12119                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12120                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12121                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12122                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12123                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12124                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12125                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12126                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12127                 }
12128                 /* Finally just fetch it out of the MAC control regs. */
12129                 else {
12130                         hi = tr32(MAC_ADDR_0_HIGH);
12131                         lo = tr32(MAC_ADDR_0_LOW);
12132
12133                         dev->dev_addr[5] = lo & 0xff;
12134                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12135                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12136                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12137                         dev->dev_addr[1] = hi & 0xff;
12138                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12139                 }
12140         }
12141
12142         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12143 #ifdef CONFIG_SPARC
12144                 if (!tg3_get_default_macaddr_sparc(tp))
12145                         return 0;
12146 #endif
12147                 return -EINVAL;
12148         }
12149         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12150         return 0;
12151 }
12152
12153 #define BOUNDARY_SINGLE_CACHELINE       1
12154 #define BOUNDARY_MULTI_CACHELINE        2
12155
12156 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12157 {
12158         int cacheline_size;
12159         u8 byte;
12160         int goal;
12161
12162         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12163         if (byte == 0)
12164                 cacheline_size = 1024;
12165         else
12166                 cacheline_size = (int) byte * 4;
12167
12168         /* On 5703 and later chips, the boundary bits have no
12169          * effect.
12170          */
12171         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12172             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12173             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12174                 goto out;
12175
12176 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12177         goal = BOUNDARY_MULTI_CACHELINE;
12178 #else
12179 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12180         goal = BOUNDARY_SINGLE_CACHELINE;
12181 #else
12182         goal = 0;
12183 #endif
12184 #endif
12185
12186         if (!goal)
12187                 goto out;
12188
12189         /* PCI controllers on most RISC systems tend to disconnect
12190          * when a device tries to burst across a cache-line boundary.
12191          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12192          *
12193          * Unfortunately, for PCI-E there are only limited
12194          * write-side controls for this, and thus for reads
12195          * we will still get the disconnects.  We'll also waste
12196          * these PCI cycles for both read and write for chips
12197          * other than 5700 and 5701 which do not implement the
12198          * boundary bits.
12199          */
12200         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12201             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12202                 switch (cacheline_size) {
12203                 case 16:
12204                 case 32:
12205                 case 64:
12206                 case 128:
12207                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12208                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12209                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12210                         } else {
12211                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12212                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12213                         }
12214                         break;
12215
12216                 case 256:
12217                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12218                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12219                         break;
12220
12221                 default:
12222                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12223                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12224                         break;
12225                 };
12226         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12227                 switch (cacheline_size) {
12228                 case 16:
12229                 case 32:
12230                 case 64:
12231                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12232                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12233                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12234                                 break;
12235                         }
12236                         /* fallthrough */
12237                 case 128:
12238                 default:
12239                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12240                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12241                         break;
12242                 };
12243         } else {
12244                 switch (cacheline_size) {
12245                 case 16:
12246                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12247                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12248                                         DMA_RWCTRL_WRITE_BNDRY_16);
12249                                 break;
12250                         }
12251                         /* fallthrough */
12252                 case 32:
12253                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12254                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12255                                         DMA_RWCTRL_WRITE_BNDRY_32);
12256                                 break;
12257                         }
12258                         /* fallthrough */
12259                 case 64:
12260                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12261                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12262                                         DMA_RWCTRL_WRITE_BNDRY_64);
12263                                 break;
12264                         }
12265                         /* fallthrough */
12266                 case 128:
12267                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12268                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12269                                         DMA_RWCTRL_WRITE_BNDRY_128);
12270                                 break;
12271                         }
12272                         /* fallthrough */
12273                 case 256:
12274                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12275                                 DMA_RWCTRL_WRITE_BNDRY_256);
12276                         break;
12277                 case 512:
12278                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12279                                 DMA_RWCTRL_WRITE_BNDRY_512);
12280                         break;
12281                 case 1024:
12282                 default:
12283                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12284                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12285                         break;
12286                 };
12287         }
12288
12289 out:
12290         return val;
12291 }
12292
12293 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12294 {
12295         struct tg3_internal_buffer_desc test_desc;
12296         u32 sram_dma_descs;
12297         int i, ret;
12298
12299         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12300
12301         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12302         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12303         tw32(RDMAC_STATUS, 0);
12304         tw32(WDMAC_STATUS, 0);
12305
12306         tw32(BUFMGR_MODE, 0);
12307         tw32(FTQ_RESET, 0);
12308
12309         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12310         test_desc.addr_lo = buf_dma & 0xffffffff;
12311         test_desc.nic_mbuf = 0x00002100;
12312         test_desc.len = size;
12313
12314         /*
12315          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12316          * the *second* time the tg3 driver was getting loaded after an
12317          * initial scan.
12318          *
12319          * Broadcom tells me:
12320          *   ...the DMA engine is connected to the GRC block and a DMA
12321          *   reset may affect the GRC block in some unpredictable way...
12322          *   The behavior of resets to individual blocks has not been tested.
12323          *
12324          * Broadcom noted the GRC reset will also reset all sub-components.
12325          */
12326         if (to_device) {
12327                 test_desc.cqid_sqid = (13 << 8) | 2;
12328
12329                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12330                 udelay(40);
12331         } else {
12332                 test_desc.cqid_sqid = (16 << 8) | 7;
12333
12334                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12335                 udelay(40);
12336         }
12337         test_desc.flags = 0x00000005;
12338
12339         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12340                 u32 val;
12341
12342                 val = *(((u32 *)&test_desc) + i);
12343                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12344                                        sram_dma_descs + (i * sizeof(u32)));
12345                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12346         }
12347         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12348
12349         if (to_device) {
12350                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12351         } else {
12352                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12353         }
12354
12355         ret = -ENODEV;
12356         for (i = 0; i < 40; i++) {
12357                 u32 val;
12358
12359                 if (to_device)
12360                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12361                 else
12362                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12363                 if ((val & 0xffff) == sram_dma_descs) {
12364                         ret = 0;
12365                         break;
12366                 }
12367
12368                 udelay(100);
12369         }
12370
12371         return ret;
12372 }
12373
12374 #define TEST_BUFFER_SIZE        0x2000
12375
12376 static int __devinit tg3_test_dma(struct tg3 *tp)
12377 {
12378         dma_addr_t buf_dma;
12379         u32 *buf, saved_dma_rwctrl;
12380         int ret;
12381
12382         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12383         if (!buf) {
12384                 ret = -ENOMEM;
12385                 goto out_nofree;
12386         }
12387
12388         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12389                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12390
12391         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12392
12393         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12394                 /* DMA read watermark not used on PCIE */
12395                 tp->dma_rwctrl |= 0x00180000;
12396         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12397                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12398                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12399                         tp->dma_rwctrl |= 0x003f0000;
12400                 else
12401                         tp->dma_rwctrl |= 0x003f000f;
12402         } else {
12403                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12404                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12405                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12406                         u32 read_water = 0x7;
12407
12408                         /* If the 5704 is behind the EPB bridge, we can
12409                          * do the less restrictive ONE_DMA workaround for
12410                          * better performance.
12411                          */
12412                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12413                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12414                                 tp->dma_rwctrl |= 0x8000;
12415                         else if (ccval == 0x6 || ccval == 0x7)
12416                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12417
12418                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12419                                 read_water = 4;
12420                         /* Set bit 23 to enable PCIX hw bug fix */
12421                         tp->dma_rwctrl |=
12422                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12423                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12424                                 (1 << 23);
12425                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12426                         /* 5780 always in PCIX mode */
12427                         tp->dma_rwctrl |= 0x00144000;
12428                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12429                         /* 5714 always in PCIX mode */
12430                         tp->dma_rwctrl |= 0x00148000;
12431                 } else {
12432                         tp->dma_rwctrl |= 0x001b000f;
12433                 }
12434         }
12435
12436         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12438                 tp->dma_rwctrl &= 0xfffffff0;
12439
12440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12442                 /* Remove this if it causes problems for some boards. */
12443                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12444
12445                 /* On 5700/5701 chips, we need to set this bit.
12446                  * Otherwise the chip will issue cacheline transactions
12447                  * to streamable DMA memory with not all the byte
12448                  * enables turned on.  This is an error on several
12449                  * RISC PCI controllers, in particular sparc64.
12450                  *
12451                  * On 5703/5704 chips, this bit has been reassigned
12452                  * a different meaning.  In particular, it is used
12453                  * on those chips to enable a PCI-X workaround.
12454                  */
12455                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12456         }
12457
12458         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12459
12460 #if 0
12461         /* Unneeded, already done by tg3_get_invariants.  */
12462         tg3_switch_clocks(tp);
12463 #endif
12464
12465         ret = 0;
12466         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12467             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12468                 goto out;
12469
12470         /* It is best to perform DMA test with maximum write burst size
12471          * to expose the 5700/5701 write DMA bug.
12472          */
12473         saved_dma_rwctrl = tp->dma_rwctrl;
12474         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12475         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12476
12477         while (1) {
12478                 u32 *p = buf, i;
12479
12480                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12481                         p[i] = i;
12482
12483                 /* Send the buffer to the chip. */
12484                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12485                 if (ret) {
12486                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12487                         break;
12488                 }
12489
12490 #if 0
12491                 /* validate data reached card RAM correctly. */
12492                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12493                         u32 val;
12494                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12495                         if (le32_to_cpu(val) != p[i]) {
12496                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12497                                 /* ret = -ENODEV here? */
12498                         }
12499                         p[i] = 0;
12500                 }
12501 #endif
12502                 /* Now read it back. */
12503                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12504                 if (ret) {
12505                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12506
12507                         break;
12508                 }
12509
12510                 /* Verify it. */
12511                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12512                         if (p[i] == i)
12513                                 continue;
12514
12515                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12516                             DMA_RWCTRL_WRITE_BNDRY_16) {
12517                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12518                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12519                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12520                                 break;
12521                         } else {
12522                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12523                                 ret = -ENODEV;
12524                                 goto out;
12525                         }
12526                 }
12527
12528                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12529                         /* Success. */
12530                         ret = 0;
12531                         break;
12532                 }
12533         }
12534         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12535             DMA_RWCTRL_WRITE_BNDRY_16) {
12536                 static struct pci_device_id dma_wait_state_chipsets[] = {
12537                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12538                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12539                         { },
12540                 };
12541
12542                 /* DMA test passed without adjusting DMA boundary,
12543                  * now look for chipsets that are known to expose the
12544                  * DMA bug without failing the test.
12545                  */
12546                 if (pci_dev_present(dma_wait_state_chipsets)) {
12547                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12548                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12549                 }
12550                 else
12551                         /* Safe to use the calculated DMA boundary. */
12552                         tp->dma_rwctrl = saved_dma_rwctrl;
12553
12554                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12555         }
12556
12557 out:
12558         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12559 out_nofree:
12560         return ret;
12561 }
12562
12563 static void __devinit tg3_init_link_config(struct tg3 *tp)
12564 {
12565         tp->link_config.advertising =
12566                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12567                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12568                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12569                  ADVERTISED_Autoneg | ADVERTISED_MII);
12570         tp->link_config.speed = SPEED_INVALID;
12571         tp->link_config.duplex = DUPLEX_INVALID;
12572         tp->link_config.autoneg = AUTONEG_ENABLE;
12573         tp->link_config.active_speed = SPEED_INVALID;
12574         tp->link_config.active_duplex = DUPLEX_INVALID;
12575         tp->link_config.phy_is_low_power = 0;
12576         tp->link_config.orig_speed = SPEED_INVALID;
12577         tp->link_config.orig_duplex = DUPLEX_INVALID;
12578         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12579 }
12580
12581 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12582 {
12583         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12584                 tp->bufmgr_config.mbuf_read_dma_low_water =
12585                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12586                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12587                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12588                 tp->bufmgr_config.mbuf_high_water =
12589                         DEFAULT_MB_HIGH_WATER_5705;
12590                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12591                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12592                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12593                         tp->bufmgr_config.mbuf_high_water =
12594                                 DEFAULT_MB_HIGH_WATER_5906;
12595                 }
12596
12597                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12598                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12599                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12600                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12601                 tp->bufmgr_config.mbuf_high_water_jumbo =
12602                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12603         } else {
12604                 tp->bufmgr_config.mbuf_read_dma_low_water =
12605                         DEFAULT_MB_RDMA_LOW_WATER;
12606                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12607                         DEFAULT_MB_MACRX_LOW_WATER;
12608                 tp->bufmgr_config.mbuf_high_water =
12609                         DEFAULT_MB_HIGH_WATER;
12610
12611                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12612                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12613                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12614                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12615                 tp->bufmgr_config.mbuf_high_water_jumbo =
12616                         DEFAULT_MB_HIGH_WATER_JUMBO;
12617         }
12618
12619         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12620         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12621 }
12622
12623 static char * __devinit tg3_phy_string(struct tg3 *tp)
12624 {
12625         switch (tp->phy_id & PHY_ID_MASK) {
12626         case PHY_ID_BCM5400:    return "5400";
12627         case PHY_ID_BCM5401:    return "5401";
12628         case PHY_ID_BCM5411:    return "5411";
12629         case PHY_ID_BCM5701:    return "5701";
12630         case PHY_ID_BCM5703:    return "5703";
12631         case PHY_ID_BCM5704:    return "5704";
12632         case PHY_ID_BCM5705:    return "5705";
12633         case PHY_ID_BCM5750:    return "5750";
12634         case PHY_ID_BCM5752:    return "5752";
12635         case PHY_ID_BCM5714:    return "5714";
12636         case PHY_ID_BCM5780:    return "5780";
12637         case PHY_ID_BCM5755:    return "5755";
12638         case PHY_ID_BCM5787:    return "5787";
12639         case PHY_ID_BCM5784:    return "5784";
12640         case PHY_ID_BCM5756:    return "5722/5756";
12641         case PHY_ID_BCM5906:    return "5906";
12642         case PHY_ID_BCM5761:    return "5761";
12643         case PHY_ID_BCM8002:    return "8002/serdes";
12644         case 0:                 return "serdes";
12645         default:                return "unknown";
12646         };
12647 }
12648
12649 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12650 {
12651         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12652                 strcpy(str, "PCI Express");
12653                 return str;
12654         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12655                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12656
12657                 strcpy(str, "PCIX:");
12658
12659                 if ((clock_ctrl == 7) ||
12660                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12661                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12662                         strcat(str, "133MHz");
12663                 else if (clock_ctrl == 0)
12664                         strcat(str, "33MHz");
12665                 else if (clock_ctrl == 2)
12666                         strcat(str, "50MHz");
12667                 else if (clock_ctrl == 4)
12668                         strcat(str, "66MHz");
12669                 else if (clock_ctrl == 6)
12670                         strcat(str, "100MHz");
12671         } else {
12672                 strcpy(str, "PCI:");
12673                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12674                         strcat(str, "66MHz");
12675                 else
12676                         strcat(str, "33MHz");
12677         }
12678         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12679                 strcat(str, ":32-bit");
12680         else
12681                 strcat(str, ":64-bit");
12682         return str;
12683 }
12684
12685 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12686 {
12687         struct pci_dev *peer;
12688         unsigned int func, devnr = tp->pdev->devfn & ~7;
12689
12690         for (func = 0; func < 8; func++) {
12691                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12692                 if (peer && peer != tp->pdev)
12693                         break;
12694                 pci_dev_put(peer);
12695         }
12696         /* 5704 can be configured in single-port mode, set peer to
12697          * tp->pdev in that case.
12698          */
12699         if (!peer) {
12700                 peer = tp->pdev;
12701                 return peer;
12702         }
12703
12704         /*
12705          * We don't need to keep the refcount elevated; there's no way
12706          * to remove one half of this device without removing the other
12707          */
12708         pci_dev_put(peer);
12709
12710         return peer;
12711 }
12712
12713 static void __devinit tg3_init_coal(struct tg3 *tp)
12714 {
12715         struct ethtool_coalesce *ec = &tp->coal;
12716
12717         memset(ec, 0, sizeof(*ec));
12718         ec->cmd = ETHTOOL_GCOALESCE;
12719         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12720         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12721         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12722         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12723         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12724         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12725         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12726         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12727         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12728
12729         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12730                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12731                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12732                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12733                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12734                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12735         }
12736
12737         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12738                 ec->rx_coalesce_usecs_irq = 0;
12739                 ec->tx_coalesce_usecs_irq = 0;
12740                 ec->stats_block_coalesce_usecs = 0;
12741         }
12742 }
12743
12744 static int __devinit tg3_init_one(struct pci_dev *pdev,
12745                                   const struct pci_device_id *ent)
12746 {
12747         static int tg3_version_printed = 0;
12748         resource_size_t tg3reg_base;
12749         unsigned long tg3reg_len;
12750         struct net_device *dev;
12751         struct tg3 *tp;
12752         int err, pm_cap;
12753         char str[40];
12754         u64 dma_mask, persist_dma_mask;
12755         DECLARE_MAC_BUF(mac);
12756
12757         if (tg3_version_printed++ == 0)
12758                 printk(KERN_INFO "%s", version);
12759
12760         err = pci_enable_device(pdev);
12761         if (err) {
12762                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12763                        "aborting.\n");
12764                 return err;
12765         }
12766
12767         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12768                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12769                        "base address, aborting.\n");
12770                 err = -ENODEV;
12771                 goto err_out_disable_pdev;
12772         }
12773
12774         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12775         if (err) {
12776                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12777                        "aborting.\n");
12778                 goto err_out_disable_pdev;
12779         }
12780
12781         pci_set_master(pdev);
12782
12783         /* Find power-management capability. */
12784         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12785         if (pm_cap == 0) {
12786                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12787                        "aborting.\n");
12788                 err = -EIO;
12789                 goto err_out_free_res;
12790         }
12791
12792         tg3reg_base = pci_resource_start(pdev, 0);
12793         tg3reg_len = pci_resource_len(pdev, 0);
12794
12795         dev = alloc_etherdev(sizeof(*tp));
12796         if (!dev) {
12797                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12798                 err = -ENOMEM;
12799                 goto err_out_free_res;
12800         }
12801
12802         SET_NETDEV_DEV(dev, &pdev->dev);
12803
12804 #if TG3_VLAN_TAG_USED
12805         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12806         dev->vlan_rx_register = tg3_vlan_rx_register;
12807 #endif
12808
12809         tp = netdev_priv(dev);
12810         tp->pdev = pdev;
12811         tp->dev = dev;
12812         tp->pm_cap = pm_cap;
12813         tp->mac_mode = TG3_DEF_MAC_MODE;
12814         tp->rx_mode = TG3_DEF_RX_MODE;
12815         tp->tx_mode = TG3_DEF_TX_MODE;
12816
12817         if (tg3_debug > 0)
12818                 tp->msg_enable = tg3_debug;
12819         else
12820                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12821
12822         /* The word/byte swap controls here control register access byte
12823          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12824          * setting below.
12825          */
12826         tp->misc_host_ctrl =
12827                 MISC_HOST_CTRL_MASK_PCI_INT |
12828                 MISC_HOST_CTRL_WORD_SWAP |
12829                 MISC_HOST_CTRL_INDIR_ACCESS |
12830                 MISC_HOST_CTRL_PCISTATE_RW;
12831
12832         /* The NONFRM (non-frame) byte/word swap controls take effect
12833          * on descriptor entries, anything which isn't packet data.
12834          *
12835          * The StrongARM chips on the board (one for tx, one for rx)
12836          * are running in big-endian mode.
12837          */
12838         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12839                         GRC_MODE_WSWAP_NONFRM_DATA);
12840 #ifdef __BIG_ENDIAN
12841         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12842 #endif
12843         spin_lock_init(&tp->lock);
12844         spin_lock_init(&tp->indirect_lock);
12845         INIT_WORK(&tp->reset_task, tg3_reset_task);
12846
12847         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12848         if (!tp->regs) {
12849                 printk(KERN_ERR PFX "Cannot map device registers, "
12850                        "aborting.\n");
12851                 err = -ENOMEM;
12852                 goto err_out_free_dev;
12853         }
12854
12855         tg3_init_link_config(tp);
12856
12857         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12858         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12859         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12860
12861         dev->open = tg3_open;
12862         dev->stop = tg3_close;
12863         dev->get_stats = tg3_get_stats;
12864         dev->set_multicast_list = tg3_set_rx_mode;
12865         dev->set_mac_address = tg3_set_mac_addr;
12866         dev->do_ioctl = tg3_ioctl;
12867         dev->tx_timeout = tg3_tx_timeout;
12868         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12869         dev->ethtool_ops = &tg3_ethtool_ops;
12870         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12871         dev->change_mtu = tg3_change_mtu;
12872         dev->irq = pdev->irq;
12873 #ifdef CONFIG_NET_POLL_CONTROLLER
12874         dev->poll_controller = tg3_poll_controller;
12875 #endif
12876
12877         err = tg3_get_invariants(tp);
12878         if (err) {
12879                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12880                        "aborting.\n");
12881                 goto err_out_iounmap;
12882         }
12883
12884         /* The EPB bridge inside 5714, 5715, and 5780 and any
12885          * device behind the EPB cannot support DMA addresses > 40-bit.
12886          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12887          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12888          * do DMA address check in tg3_start_xmit().
12889          */
12890         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12891                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12892         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12893                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12894 #ifdef CONFIG_HIGHMEM
12895                 dma_mask = DMA_64BIT_MASK;
12896 #endif
12897         } else
12898                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12899
12900         /* Configure DMA attributes. */
12901         if (dma_mask > DMA_32BIT_MASK) {
12902                 err = pci_set_dma_mask(pdev, dma_mask);
12903                 if (!err) {
12904                         dev->features |= NETIF_F_HIGHDMA;
12905                         err = pci_set_consistent_dma_mask(pdev,
12906                                                           persist_dma_mask);
12907                         if (err < 0) {
12908                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12909                                        "DMA for consistent allocations\n");
12910                                 goto err_out_iounmap;
12911                         }
12912                 }
12913         }
12914         if (err || dma_mask == DMA_32BIT_MASK) {
12915                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12916                 if (err) {
12917                         printk(KERN_ERR PFX "No usable DMA configuration, "
12918                                "aborting.\n");
12919                         goto err_out_iounmap;
12920                 }
12921         }
12922
12923         tg3_init_bufmgr_config(tp);
12924
12925         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12926                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12927         }
12928         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12930             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12932             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12933                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12934         } else {
12935                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12936         }
12937
12938         /* TSO is on by default on chips that support hardware TSO.
12939          * Firmware TSO on older chips gives lower performance, so it
12940          * is off by default, but can be enabled using ethtool.
12941          */
12942         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12943                 dev->features |= NETIF_F_TSO;
12944                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12945                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12946                         dev->features |= NETIF_F_TSO6;
12947                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12948                         dev->features |= NETIF_F_TSO_ECN;
12949         }
12950
12951
12952         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12953             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12954             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12955                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12956                 tp->rx_pending = 63;
12957         }
12958
12959         err = tg3_get_device_address(tp);
12960         if (err) {
12961                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12962                        "aborting.\n");
12963                 goto err_out_iounmap;
12964         }
12965
12966         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12967                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12968                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12969                                "base address for APE, aborting.\n");
12970                         err = -ENODEV;
12971                         goto err_out_iounmap;
12972                 }
12973
12974                 tg3reg_base = pci_resource_start(pdev, 2);
12975                 tg3reg_len = pci_resource_len(pdev, 2);
12976
12977                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12978                 if (!tp->aperegs) {
12979                         printk(KERN_ERR PFX "Cannot map APE registers, "
12980                                "aborting.\n");
12981                         err = -ENOMEM;
12982                         goto err_out_iounmap;
12983                 }
12984
12985                 tg3_ape_lock_init(tp);
12986         }
12987
12988         /*
12989          * Reset chip in case UNDI or EFI driver did not shutdown
12990          * DMA self test will enable WDMAC and we'll see (spurious)
12991          * pending DMA on the PCI bus at that point.
12992          */
12993         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12994             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12995                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12996                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12997         }
12998
12999         err = tg3_test_dma(tp);
13000         if (err) {
13001                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13002                 goto err_out_apeunmap;
13003         }
13004
13005         /* Tigon3 can do ipv4 only... and some chips have buggy
13006          * checksumming.
13007          */
13008         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13009                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13010                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13011                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13012                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13013                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13014                         dev->features |= NETIF_F_IPV6_CSUM;
13015
13016                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13017         } else
13018                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13019
13020         /* flow control autonegotiation is default behavior */
13021         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13022         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13023
13024         tg3_init_coal(tp);
13025
13026         pci_set_drvdata(pdev, dev);
13027
13028         err = register_netdev(dev);
13029         if (err) {
13030                 printk(KERN_ERR PFX "Cannot register net device, "
13031                        "aborting.\n");
13032                 goto err_out_apeunmap;
13033         }
13034
13035         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13036                "(%s) %s Ethernet %s\n",
13037                dev->name,
13038                tp->board_part_number,
13039                tp->pci_chip_rev_id,
13040                tg3_phy_string(tp),
13041                tg3_bus_string(tp, str),
13042                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13043                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13044                  "10/100/1000Base-T")),
13045                print_mac(mac, dev->dev_addr));
13046
13047         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13048                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13049                dev->name,
13050                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13051                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13052                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13053                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13054                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13055                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13056         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13057                dev->name, tp->dma_rwctrl,
13058                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13059                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13060
13061         return 0;
13062
13063 err_out_apeunmap:
13064         if (tp->aperegs) {
13065                 iounmap(tp->aperegs);
13066                 tp->aperegs = NULL;
13067         }
13068
13069 err_out_iounmap:
13070         if (tp->regs) {
13071                 iounmap(tp->regs);
13072                 tp->regs = NULL;
13073         }
13074
13075 err_out_free_dev:
13076         free_netdev(dev);
13077
13078 err_out_free_res:
13079         pci_release_regions(pdev);
13080
13081 err_out_disable_pdev:
13082         pci_disable_device(pdev);
13083         pci_set_drvdata(pdev, NULL);
13084         return err;
13085 }
13086
13087 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13088 {
13089         struct net_device *dev = pci_get_drvdata(pdev);
13090
13091         if (dev) {
13092                 struct tg3 *tp = netdev_priv(dev);
13093
13094                 flush_scheduled_work();
13095                 unregister_netdev(dev);
13096                 if (tp->aperegs) {
13097                         iounmap(tp->aperegs);
13098                         tp->aperegs = NULL;
13099                 }
13100                 if (tp->regs) {
13101                         iounmap(tp->regs);
13102                         tp->regs = NULL;
13103                 }
13104                 free_netdev(dev);
13105                 pci_release_regions(pdev);
13106                 pci_disable_device(pdev);
13107                 pci_set_drvdata(pdev, NULL);
13108         }
13109 }
13110
13111 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13112 {
13113         struct net_device *dev = pci_get_drvdata(pdev);
13114         struct tg3 *tp = netdev_priv(dev);
13115         int err;
13116
13117         /* PCI register 4 needs to be saved whether netif_running() or not.
13118          * MSI address and data need to be saved if using MSI and
13119          * netif_running().
13120          */
13121         pci_save_state(pdev);
13122
13123         if (!netif_running(dev))
13124                 return 0;
13125
13126         flush_scheduled_work();
13127         tg3_netif_stop(tp);
13128
13129         del_timer_sync(&tp->timer);
13130
13131         tg3_full_lock(tp, 1);
13132         tg3_disable_ints(tp);
13133         tg3_full_unlock(tp);
13134
13135         netif_device_detach(dev);
13136
13137         tg3_full_lock(tp, 0);
13138         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13139         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13140         tg3_full_unlock(tp);
13141
13142         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13143         if (err) {
13144                 tg3_full_lock(tp, 0);
13145
13146                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13147                 if (tg3_restart_hw(tp, 1))
13148                         goto out;
13149
13150                 tp->timer.expires = jiffies + tp->timer_offset;
13151                 add_timer(&tp->timer);
13152
13153                 netif_device_attach(dev);
13154                 tg3_netif_start(tp);
13155
13156 out:
13157                 tg3_full_unlock(tp);
13158         }
13159
13160         return err;
13161 }
13162
13163 static int tg3_resume(struct pci_dev *pdev)
13164 {
13165         struct net_device *dev = pci_get_drvdata(pdev);
13166         struct tg3 *tp = netdev_priv(dev);
13167         int err;
13168
13169         pci_restore_state(tp->pdev);
13170
13171         if (!netif_running(dev))
13172                 return 0;
13173
13174         err = tg3_set_power_state(tp, PCI_D0);
13175         if (err)
13176                 return err;
13177
13178         netif_device_attach(dev);
13179
13180         tg3_full_lock(tp, 0);
13181
13182         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13183         err = tg3_restart_hw(tp, 1);
13184         if (err)
13185                 goto out;
13186
13187         tp->timer.expires = jiffies + tp->timer_offset;
13188         add_timer(&tp->timer);
13189
13190         tg3_netif_start(tp);
13191
13192 out:
13193         tg3_full_unlock(tp);
13194
13195         return err;
13196 }
13197
13198 static struct pci_driver tg3_driver = {
13199         .name           = DRV_MODULE_NAME,
13200         .id_table       = tg3_pci_tbl,
13201         .probe          = tg3_init_one,
13202         .remove         = __devexit_p(tg3_remove_one),
13203         .suspend        = tg3_suspend,
13204         .resume         = tg3_resume
13205 };
13206
13207 static int __init tg3_init(void)
13208 {
13209         return pci_register_driver(&tg3_driver);
13210 }
13211
13212 static void __exit tg3_cleanup(void)
13213 {
13214         pci_unregister_driver(&tg3_driver);
13215 }
13216
13217 module_init(tg3_init);
13218 module_exit(tg3_cleanup);