]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge tag 'for-linus-20121212' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowe...
[can-eth-gw-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #ifdef CONFIG_SPARC
58 #include <asm/idprom.h>
59 #include <asm/prom.h>
60 #endif
61
62 #define BAR_0   0
63 #define BAR_2   2
64
65 #include "tg3.h"
66
67 /* Functions & macros to verify TG3_FLAGS types */
68
69 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 {
71         return test_bit(flag, bits);
72 }
73
74 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76         set_bit(flag, bits);
77 }
78
79 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81         clear_bit(flag, bits);
82 }
83
84 #define tg3_flag(tp, flag)                              \
85         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_set(tp, flag)                          \
87         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_clear(tp, flag)                        \
89         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
91 #define DRV_MODULE_NAME         "tg3"
92 #define TG3_MAJ_NUM                     3
93 #define TG3_MIN_NUM                     125
94 #define DRV_MODULE_VERSION      \
95         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96 #define DRV_MODULE_RELDATE      "September 26, 2012"
97
98 #define RESET_KIND_SHUTDOWN     0
99 #define RESET_KIND_INIT         1
100 #define RESET_KIND_SUSPEND      2
101
102 #define TG3_DEF_RX_MODE         0
103 #define TG3_DEF_TX_MODE         0
104 #define TG3_DEF_MSG_ENABLE        \
105         (NETIF_MSG_DRV          | \
106          NETIF_MSG_PROBE        | \
107          NETIF_MSG_LINK         | \
108          NETIF_MSG_TIMER        | \
109          NETIF_MSG_IFDOWN       | \
110          NETIF_MSG_IFUP         | \
111          NETIF_MSG_RX_ERR       | \
112          NETIF_MSG_TX_ERR)
113
114 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
115
116 /* length of time before we decide the hardware is borked,
117  * and dev->tx_timeout() should be called to fix the problem
118  */
119
120 #define TG3_TX_TIMEOUT                  (5 * HZ)
121
122 /* hardware minimum and maximum for a single frame's data payload */
123 #define TG3_MIN_MTU                     60
124 #define TG3_MAX_MTU(tp) \
125         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126
127 /* These numbers seem to be hard coded in the NIC firmware somehow.
128  * You can't change the ring sizes, but you can change where you place
129  * them in the NIC onboard memory.
130  */
131 #define TG3_RX_STD_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
134 #define TG3_DEF_RX_RING_PENDING         200
135 #define TG3_RX_JMB_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
138 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX_2K            2048
203 #define TG3_TX_BD_DMA_MAX_4K            4096
204
205 #define TG3_RAW_IP_ALIGN 2
206
207 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
208 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209
210 #define FIRMWARE_TG3            "tigon/tg3.bin"
211 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
212 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
213
214 static char version[] __devinitdata =
215         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
216
217 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219 MODULE_LICENSE("GPL");
220 MODULE_VERSION(DRV_MODULE_VERSION);
221 MODULE_FIRMWARE(FIRMWARE_TG3);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
225 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
226 module_param(tg3_debug, int, 0);
227 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
229 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
304         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
305         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
308         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
309         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
310         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
311         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312         {}
313 };
314
315 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
316
317 static const struct {
318         const char string[ETH_GSTRING_LEN];
319 } ethtool_stats_keys[] = {
320         { "rx_octets" },
321         { "rx_fragments" },
322         { "rx_ucast_packets" },
323         { "rx_mcast_packets" },
324         { "rx_bcast_packets" },
325         { "rx_fcs_errors" },
326         { "rx_align_errors" },
327         { "rx_xon_pause_rcvd" },
328         { "rx_xoff_pause_rcvd" },
329         { "rx_mac_ctrl_rcvd" },
330         { "rx_xoff_entered" },
331         { "rx_frame_too_long_errors" },
332         { "rx_jabbers" },
333         { "rx_undersize_packets" },
334         { "rx_in_length_errors" },
335         { "rx_out_length_errors" },
336         { "rx_64_or_less_octet_packets" },
337         { "rx_65_to_127_octet_packets" },
338         { "rx_128_to_255_octet_packets" },
339         { "rx_256_to_511_octet_packets" },
340         { "rx_512_to_1023_octet_packets" },
341         { "rx_1024_to_1522_octet_packets" },
342         { "rx_1523_to_2047_octet_packets" },
343         { "rx_2048_to_4095_octet_packets" },
344         { "rx_4096_to_8191_octet_packets" },
345         { "rx_8192_to_9022_octet_packets" },
346
347         { "tx_octets" },
348         { "tx_collisions" },
349
350         { "tx_xon_sent" },
351         { "tx_xoff_sent" },
352         { "tx_flow_control" },
353         { "tx_mac_errors" },
354         { "tx_single_collisions" },
355         { "tx_mult_collisions" },
356         { "tx_deferred" },
357         { "tx_excessive_collisions" },
358         { "tx_late_collisions" },
359         { "tx_collide_2times" },
360         { "tx_collide_3times" },
361         { "tx_collide_4times" },
362         { "tx_collide_5times" },
363         { "tx_collide_6times" },
364         { "tx_collide_7times" },
365         { "tx_collide_8times" },
366         { "tx_collide_9times" },
367         { "tx_collide_10times" },
368         { "tx_collide_11times" },
369         { "tx_collide_12times" },
370         { "tx_collide_13times" },
371         { "tx_collide_14times" },
372         { "tx_collide_15times" },
373         { "tx_ucast_packets" },
374         { "tx_mcast_packets" },
375         { "tx_bcast_packets" },
376         { "tx_carrier_sense_errors" },
377         { "tx_discards" },
378         { "tx_errors" },
379
380         { "dma_writeq_full" },
381         { "dma_write_prioq_full" },
382         { "rxbds_empty" },
383         { "rx_discards" },
384         { "rx_errors" },
385         { "rx_threshold_hit" },
386
387         { "dma_readq_full" },
388         { "dma_read_prioq_full" },
389         { "tx_comp_queue_full" },
390
391         { "ring_set_send_prod_index" },
392         { "ring_status_update" },
393         { "nic_irqs" },
394         { "nic_avoided_irqs" },
395         { "nic_tx_threshold_hit" },
396
397         { "mbuf_lwm_thresh_hit" },
398 };
399
400 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
401
402
403 static const struct {
404         const char string[ETH_GSTRING_LEN];
405 } ethtool_test_keys[] = {
406         { "nvram test        (online) " },
407         { "link test         (online) " },
408         { "register test     (offline)" },
409         { "memory test       (offline)" },
410         { "mac loopback test (offline)" },
411         { "phy loopback test (offline)" },
412         { "ext loopback test (offline)" },
413         { "interrupt test    (offline)" },
414 };
415
416 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
417
418
419 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
420 {
421         writel(val, tp->regs + off);
422 }
423
424 static u32 tg3_read32(struct tg3 *tp, u32 off)
425 {
426         return readl(tp->regs + off);
427 }
428
429 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
430 {
431         writel(val, tp->aperegs + off);
432 }
433
434 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
435 {
436         return readl(tp->aperegs + off);
437 }
438
439 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440 {
441         unsigned long flags;
442
443         spin_lock_irqsave(&tp->indirect_lock, flags);
444         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
446         spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 }
448
449 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
450 {
451         writel(val, tp->regs + off);
452         readl(tp->regs + off);
453 }
454
455 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
456 {
457         unsigned long flags;
458         u32 val;
459
460         spin_lock_irqsave(&tp->indirect_lock, flags);
461         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
462         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
463         spin_unlock_irqrestore(&tp->indirect_lock, flags);
464         return val;
465 }
466
467 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
472                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
473                                        TG3_64BIT_REG_LOW, val);
474                 return;
475         }
476         if (off == TG3_RX_STD_PROD_IDX_REG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
478                                        TG3_64BIT_REG_LOW, val);
479                 return;
480         }
481
482         spin_lock_irqsave(&tp->indirect_lock, flags);
483         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486
487         /* In indirect mode when disabling interrupts, we also need
488          * to clear the interrupt bit in the GRC local ctrl register.
489          */
490         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
491             (val == 0x1)) {
492                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
493                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494         }
495 }
496
497 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
498 {
499         unsigned long flags;
500         u32 val;
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
504         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
505         spin_unlock_irqrestore(&tp->indirect_lock, flags);
506         return val;
507 }
508
509 /* usec_wait specifies the wait time in usec when writing to certain registers
510  * where it is unsafe to read back the register without some delay.
511  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
512  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
513  */
514 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
515 {
516         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
517                 /* Non-posted methods */
518                 tp->write32(tp, off, val);
519         else {
520                 /* Posted method */
521                 tg3_write32(tp, off, val);
522                 if (usec_wait)
523                         udelay(usec_wait);
524                 tp->read32(tp, off);
525         }
526         /* Wait again after the read for the posted method to guarantee that
527          * the wait time is met.
528          */
529         if (usec_wait)
530                 udelay(usec_wait);
531 }
532
533 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
534 {
535         tp->write32_mbox(tp, off, val);
536         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
537                 tp->read32_mbox(tp, off);
538 }
539
540 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
541 {
542         void __iomem *mbox = tp->regs + off;
543         writel(val, mbox);
544         if (tg3_flag(tp, TXD_MBOX_HWBUG))
545                 writel(val, mbox);
546         if (tg3_flag(tp, MBOX_WRITE_REORDER))
547                 readl(mbox);
548 }
549
550 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
551 {
552         return readl(tp->regs + off + GRCMBOX_BASE);
553 }
554
555 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
556 {
557         writel(val, tp->regs + off + GRCMBOX_BASE);
558 }
559
560 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
561 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
562 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
563 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
564 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
565
566 #define tw32(reg, val)                  tp->write32(tp, reg, val)
567 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
568 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
569 #define tr32(reg)                       tp->read32(tp, reg)
570
571 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572 {
573         unsigned long flags;
574
575         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
576             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
577                 return;
578
579         spin_lock_irqsave(&tp->indirect_lock, flags);
580         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
581                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
583
584                 /* Always leave this as zero. */
585                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
586         } else {
587                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
588                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
589
590                 /* Always leave this as zero. */
591                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
592         }
593         spin_unlock_irqrestore(&tp->indirect_lock, flags);
594 }
595
596 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597 {
598         unsigned long flags;
599
600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
601             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
602                 *val = 0;
603                 return;
604         }
605
606         spin_lock_irqsave(&tp->indirect_lock, flags);
607         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
608                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
609                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
610
611                 /* Always leave this as zero. */
612                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
613         } else {
614                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
615                 *val = tr32(TG3PCI_MEM_WIN_DATA);
616
617                 /* Always leave this as zero. */
618                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
619         }
620         spin_unlock_irqrestore(&tp->indirect_lock, flags);
621 }
622
623 static void tg3_ape_lock_init(struct tg3 *tp)
624 {
625         int i;
626         u32 regbase, bit;
627
628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
629                 regbase = TG3_APE_LOCK_GRANT;
630         else
631                 regbase = TG3_APE_PER_LOCK_GRANT;
632
633         /* Make sure the driver hasn't any stale locks. */
634         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
635                 switch (i) {
636                 case TG3_APE_LOCK_PHY0:
637                 case TG3_APE_LOCK_PHY1:
638                 case TG3_APE_LOCK_PHY2:
639                 case TG3_APE_LOCK_PHY3:
640                         bit = APE_LOCK_GRANT_DRIVER;
641                         break;
642                 default:
643                         if (!tp->pci_fn)
644                                 bit = APE_LOCK_GRANT_DRIVER;
645                         else
646                                 bit = 1 << tp->pci_fn;
647                 }
648                 tg3_ape_write32(tp, regbase + 4 * i, bit);
649         }
650
651 }
652
653 static int tg3_ape_lock(struct tg3 *tp, int locknum)
654 {
655         int i, off;
656         int ret = 0;
657         u32 status, req, gnt, bit;
658
659         if (!tg3_flag(tp, ENABLE_APE))
660                 return 0;
661
662         switch (locknum) {
663         case TG3_APE_LOCK_GPIO:
664                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
665                         return 0;
666         case TG3_APE_LOCK_GRC:
667         case TG3_APE_LOCK_MEM:
668                 if (!tp->pci_fn)
669                         bit = APE_LOCK_REQ_DRIVER;
670                 else
671                         bit = 1 << tp->pci_fn;
672                 break;
673         case TG3_APE_LOCK_PHY0:
674         case TG3_APE_LOCK_PHY1:
675         case TG3_APE_LOCK_PHY2:
676         case TG3_APE_LOCK_PHY3:
677                 bit = APE_LOCK_REQ_DRIVER;
678                 break;
679         default:
680                 return -EINVAL;
681         }
682
683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
684                 req = TG3_APE_LOCK_REQ;
685                 gnt = TG3_APE_LOCK_GRANT;
686         } else {
687                 req = TG3_APE_PER_LOCK_REQ;
688                 gnt = TG3_APE_PER_LOCK_GRANT;
689         }
690
691         off = 4 * locknum;
692
693         tg3_ape_write32(tp, req + off, bit);
694
695         /* Wait for up to 1 millisecond to acquire lock. */
696         for (i = 0; i < 100; i++) {
697                 status = tg3_ape_read32(tp, gnt + off);
698                 if (status == bit)
699                         break;
700                 udelay(10);
701         }
702
703         if (status != bit) {
704                 /* Revoke the lock request. */
705                 tg3_ape_write32(tp, gnt + off, bit);
706                 ret = -EBUSY;
707         }
708
709         return ret;
710 }
711
712 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
713 {
714         u32 gnt, bit;
715
716         if (!tg3_flag(tp, ENABLE_APE))
717                 return;
718
719         switch (locknum) {
720         case TG3_APE_LOCK_GPIO:
721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
722                         return;
723         case TG3_APE_LOCK_GRC:
724         case TG3_APE_LOCK_MEM:
725                 if (!tp->pci_fn)
726                         bit = APE_LOCK_GRANT_DRIVER;
727                 else
728                         bit = 1 << tp->pci_fn;
729                 break;
730         case TG3_APE_LOCK_PHY0:
731         case TG3_APE_LOCK_PHY1:
732         case TG3_APE_LOCK_PHY2:
733         case TG3_APE_LOCK_PHY3:
734                 bit = APE_LOCK_GRANT_DRIVER;
735                 break;
736         default:
737                 return;
738         }
739
740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
741                 gnt = TG3_APE_LOCK_GRANT;
742         else
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744
745         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
746 }
747
748 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
749 {
750         u32 apedata;
751
752         while (timeout_us) {
753                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754                         return -EBUSY;
755
756                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         break;
759
760                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
761
762                 udelay(10);
763                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
764         }
765
766         return timeout_us ? 0 : -EBUSY;
767 }
768
769 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
770 {
771         u32 i, apedata;
772
773         for (i = 0; i < timeout_us / 10; i++) {
774                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
775
776                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
777                         break;
778
779                 udelay(10);
780         }
781
782         return i == timeout_us / 10;
783 }
784
785 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
786                                    u32 len)
787 {
788         int err;
789         u32 i, bufoff, msgoff, maxlen, apedata;
790
791         if (!tg3_flag(tp, APE_HAS_NCSI))
792                 return 0;
793
794         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
795         if (apedata != APE_SEG_SIG_MAGIC)
796                 return -ENODEV;
797
798         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
799         if (!(apedata & APE_FW_STATUS_READY))
800                 return -EAGAIN;
801
802         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
803                  TG3_APE_SHMEM_BASE;
804         msgoff = bufoff + 2 * sizeof(u32);
805         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
806
807         while (len) {
808                 u32 length;
809
810                 /* Cap xfer sizes to scratchpad limits. */
811                 length = (len > maxlen) ? maxlen : len;
812                 len -= length;
813
814                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
815                 if (!(apedata & APE_FW_STATUS_READY))
816                         return -EAGAIN;
817
818                 /* Wait for up to 1 msec for APE to service previous event. */
819                 err = tg3_ape_event_lock(tp, 1000);
820                 if (err)
821                         return err;
822
823                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
824                           APE_EVENT_STATUS_SCRTCHPD_READ |
825                           APE_EVENT_STATUS_EVENT_PENDING;
826                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
827
828                 tg3_ape_write32(tp, bufoff, base_off);
829                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
830
831                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
832                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
833
834                 base_off += length;
835
836                 if (tg3_ape_wait_for_event(tp, 30000))
837                         return -EAGAIN;
838
839                 for (i = 0; length; i += 4, length -= 4) {
840                         u32 val = tg3_ape_read32(tp, msgoff + i);
841                         memcpy(data, &val, sizeof(u32));
842                         data++;
843                 }
844         }
845
846         return 0;
847 }
848
849 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
850 {
851         int err;
852         u32 apedata;
853
854         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855         if (apedata != APE_SEG_SIG_MAGIC)
856                 return -EAGAIN;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859         if (!(apedata & APE_FW_STATUS_READY))
860                 return -EAGAIN;
861
862         /* Wait for up to 1 millisecond for APE to service previous event. */
863         err = tg3_ape_event_lock(tp, 1000);
864         if (err)
865                 return err;
866
867         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
868                         event | APE_EVENT_STATUS_EVENT_PENDING);
869
870         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
871         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
872
873         return 0;
874 }
875
876 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
877 {
878         u32 event;
879         u32 apedata;
880
881         if (!tg3_flag(tp, ENABLE_APE))
882                 return;
883
884         switch (kind) {
885         case RESET_KIND_INIT:
886                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
887                                 APE_HOST_SEG_SIG_MAGIC);
888                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
889                                 APE_HOST_SEG_LEN_MAGIC);
890                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
891                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
892                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
893                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
894                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
895                                 APE_HOST_BEHAV_NO_PHYLOCK);
896                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
897                                     TG3_APE_HOST_DRVR_STATE_START);
898
899                 event = APE_EVENT_STATUS_STATE_START;
900                 break;
901         case RESET_KIND_SHUTDOWN:
902                 /* With the interface we are currently using,
903                  * APE does not track driver state.  Wiping
904                  * out the HOST SEGMENT SIGNATURE forces
905                  * the APE to assume OS absent status.
906                  */
907                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
908
909                 if (device_may_wakeup(&tp->pdev->dev) &&
910                     tg3_flag(tp, WOL_ENABLE)) {
911                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
912                                             TG3_APE_HOST_WOL_SPEED_AUTO);
913                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
914                 } else
915                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
916
917                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
918
919                 event = APE_EVENT_STATUS_STATE_UNLOAD;
920                 break;
921         case RESET_KIND_SUSPEND:
922                 event = APE_EVENT_STATUS_STATE_SUSPEND;
923                 break;
924         default:
925                 return;
926         }
927
928         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
929
930         tg3_ape_send_event(tp, event);
931 }
932
933 static void tg3_disable_ints(struct tg3 *tp)
934 {
935         int i;
936
937         tw32(TG3PCI_MISC_HOST_CTRL,
938              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
939         for (i = 0; i < tp->irq_max; i++)
940                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
941 }
942
943 static void tg3_enable_ints(struct tg3 *tp)
944 {
945         int i;
946
947         tp->irq_sync = 0;
948         wmb();
949
950         tw32(TG3PCI_MISC_HOST_CTRL,
951              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
952
953         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
954         for (i = 0; i < tp->irq_cnt; i++) {
955                 struct tg3_napi *tnapi = &tp->napi[i];
956
957                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
958                 if (tg3_flag(tp, 1SHOT_MSI))
959                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
960
961                 tp->coal_now |= tnapi->coal_now;
962         }
963
964         /* Force an initial interrupt */
965         if (!tg3_flag(tp, TAGGED_STATUS) &&
966             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
967                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
968         else
969                 tw32(HOSTCC_MODE, tp->coal_now);
970
971         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
972 }
973
974 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
975 {
976         struct tg3 *tp = tnapi->tp;
977         struct tg3_hw_status *sblk = tnapi->hw_status;
978         unsigned int work_exists = 0;
979
980         /* check for phy events */
981         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
982                 if (sblk->status & SD_STATUS_LINK_CHG)
983                         work_exists = 1;
984         }
985
986         /* check for TX work to do */
987         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
988                 work_exists = 1;
989
990         /* check for RX work to do */
991         if (tnapi->rx_rcb_prod_idx &&
992             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
993                 work_exists = 1;
994
995         return work_exists;
996 }
997
998 /* tg3_int_reenable
999  *  similar to tg3_enable_ints, but it accurately determines whether there
1000  *  is new work pending and can return without flushing the PIO write
1001  *  which reenables interrupts
1002  */
1003 static void tg3_int_reenable(struct tg3_napi *tnapi)
1004 {
1005         struct tg3 *tp = tnapi->tp;
1006
1007         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1008         mmiowb();
1009
1010         /* When doing tagged status, this work check is unnecessary.
1011          * The last_tag we write above tells the chip which piece of
1012          * work we've completed.
1013          */
1014         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1015                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1016                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1017 }
1018
1019 static void tg3_switch_clocks(struct tg3 *tp)
1020 {
1021         u32 clock_ctrl;
1022         u32 orig_clock_ctrl;
1023
1024         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1025                 return;
1026
1027         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1028
1029         orig_clock_ctrl = clock_ctrl;
1030         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1031                        CLOCK_CTRL_CLKRUN_OENABLE |
1032                        0x1f);
1033         tp->pci_clock_ctrl = clock_ctrl;
1034
1035         if (tg3_flag(tp, 5705_PLUS)) {
1036                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1037                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1038                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1039                 }
1040         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1041                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1042                             clock_ctrl |
1043                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1044                             40);
1045                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1046                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1047                             40);
1048         }
1049         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1050 }
1051
1052 #define PHY_BUSY_LOOPS  5000
1053
1054 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1055 {
1056         u32 frame_val;
1057         unsigned int loops;
1058         int ret;
1059
1060         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1061                 tw32_f(MAC_MI_MODE,
1062                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1063                 udelay(80);
1064         }
1065
1066         tg3_ape_lock(tp, tp->phy_ape_lock);
1067
1068         *val = 0x0;
1069
1070         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1071                       MI_COM_PHY_ADDR_MASK);
1072         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1073                       MI_COM_REG_ADDR_MASK);
1074         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1075
1076         tw32_f(MAC_MI_COM, frame_val);
1077
1078         loops = PHY_BUSY_LOOPS;
1079         while (loops != 0) {
1080                 udelay(10);
1081                 frame_val = tr32(MAC_MI_COM);
1082
1083                 if ((frame_val & MI_COM_BUSY) == 0) {
1084                         udelay(5);
1085                         frame_val = tr32(MAC_MI_COM);
1086                         break;
1087                 }
1088                 loops -= 1;
1089         }
1090
1091         ret = -EBUSY;
1092         if (loops != 0) {
1093                 *val = frame_val & MI_COM_DATA_MASK;
1094                 ret = 0;
1095         }
1096
1097         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1099                 udelay(80);
1100         }
1101
1102         tg3_ape_unlock(tp, tp->phy_ape_lock);
1103
1104         return ret;
1105 }
1106
1107 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1108 {
1109         u32 frame_val;
1110         unsigned int loops;
1111         int ret;
1112
1113         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1114             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1115                 return 0;
1116
1117         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1118                 tw32_f(MAC_MI_MODE,
1119                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1120                 udelay(80);
1121         }
1122
1123         tg3_ape_lock(tp, tp->phy_ape_lock);
1124
1125         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1126                       MI_COM_PHY_ADDR_MASK);
1127         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1128                       MI_COM_REG_ADDR_MASK);
1129         frame_val |= (val & MI_COM_DATA_MASK);
1130         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1131
1132         tw32_f(MAC_MI_COM, frame_val);
1133
1134         loops = PHY_BUSY_LOOPS;
1135         while (loops != 0) {
1136                 udelay(10);
1137                 frame_val = tr32(MAC_MI_COM);
1138                 if ((frame_val & MI_COM_BUSY) == 0) {
1139                         udelay(5);
1140                         frame_val = tr32(MAC_MI_COM);
1141                         break;
1142                 }
1143                 loops -= 1;
1144         }
1145
1146         ret = -EBUSY;
1147         if (loops != 0)
1148                 ret = 0;
1149
1150         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152                 udelay(80);
1153         }
1154
1155         tg3_ape_unlock(tp, tp->phy_ape_lock);
1156
1157         return ret;
1158 }
1159
1160 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1161 {
1162         int err;
1163
1164         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1165         if (err)
1166                 goto done;
1167
1168         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1169         if (err)
1170                 goto done;
1171
1172         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1173                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1174         if (err)
1175                 goto done;
1176
1177         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1178
1179 done:
1180         return err;
1181 }
1182
1183 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1184 {
1185         int err;
1186
1187         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1188         if (err)
1189                 goto done;
1190
1191         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1192         if (err)
1193                 goto done;
1194
1195         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1196                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1197         if (err)
1198                 goto done;
1199
1200         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1201
1202 done:
1203         return err;
1204 }
1205
1206 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1207 {
1208         int err;
1209
1210         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1211         if (!err)
1212                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1213
1214         return err;
1215 }
1216
1217 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1218 {
1219         int err;
1220
1221         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1222         if (!err)
1223                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1224
1225         return err;
1226 }
1227
1228 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1229 {
1230         int err;
1231
1232         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1233                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1234                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1235         if (!err)
1236                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1237
1238         return err;
1239 }
1240
1241 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1242 {
1243         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1244                 set |= MII_TG3_AUXCTL_MISC_WREN;
1245
1246         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1247 }
1248
1249 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1250         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1251                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1252                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1253
1254 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1255         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1256                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1257
1258 static int tg3_bmcr_reset(struct tg3 *tp)
1259 {
1260         u32 phy_control;
1261         int limit, err;
1262
1263         /* OK, reset it, and poll the BMCR_RESET bit until it
1264          * clears or we time out.
1265          */
1266         phy_control = BMCR_RESET;
1267         err = tg3_writephy(tp, MII_BMCR, phy_control);
1268         if (err != 0)
1269                 return -EBUSY;
1270
1271         limit = 5000;
1272         while (limit--) {
1273                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1274                 if (err != 0)
1275                         return -EBUSY;
1276
1277                 if ((phy_control & BMCR_RESET) == 0) {
1278                         udelay(40);
1279                         break;
1280                 }
1281                 udelay(10);
1282         }
1283         if (limit < 0)
1284                 return -EBUSY;
1285
1286         return 0;
1287 }
1288
1289 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1290 {
1291         struct tg3 *tp = bp->priv;
1292         u32 val;
1293
1294         spin_lock_bh(&tp->lock);
1295
1296         if (tg3_readphy(tp, reg, &val))
1297                 val = -EIO;
1298
1299         spin_unlock_bh(&tp->lock);
1300
1301         return val;
1302 }
1303
1304 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1305 {
1306         struct tg3 *tp = bp->priv;
1307         u32 ret = 0;
1308
1309         spin_lock_bh(&tp->lock);
1310
1311         if (tg3_writephy(tp, reg, val))
1312                 ret = -EIO;
1313
1314         spin_unlock_bh(&tp->lock);
1315
1316         return ret;
1317 }
1318
1319 static int tg3_mdio_reset(struct mii_bus *bp)
1320 {
1321         return 0;
1322 }
1323
1324 static void tg3_mdio_config_5785(struct tg3 *tp)
1325 {
1326         u32 val;
1327         struct phy_device *phydev;
1328
1329         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1330         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1331         case PHY_ID_BCM50610:
1332         case PHY_ID_BCM50610M:
1333                 val = MAC_PHYCFG2_50610_LED_MODES;
1334                 break;
1335         case PHY_ID_BCMAC131:
1336                 val = MAC_PHYCFG2_AC131_LED_MODES;
1337                 break;
1338         case PHY_ID_RTL8211C:
1339                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1340                 break;
1341         case PHY_ID_RTL8201E:
1342                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1343                 break;
1344         default:
1345                 return;
1346         }
1347
1348         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1349                 tw32(MAC_PHYCFG2, val);
1350
1351                 val = tr32(MAC_PHYCFG1);
1352                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1353                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1354                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1355                 tw32(MAC_PHYCFG1, val);
1356
1357                 return;
1358         }
1359
1360         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1361                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1362                        MAC_PHYCFG2_FMODE_MASK_MASK |
1363                        MAC_PHYCFG2_GMODE_MASK_MASK |
1364                        MAC_PHYCFG2_ACT_MASK_MASK   |
1365                        MAC_PHYCFG2_QUAL_MASK_MASK |
1366                        MAC_PHYCFG2_INBAND_ENABLE;
1367
1368         tw32(MAC_PHYCFG2, val);
1369
1370         val = tr32(MAC_PHYCFG1);
1371         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1372                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1373         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1374                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1375                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1376                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1377                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1378         }
1379         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1380                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1381         tw32(MAC_PHYCFG1, val);
1382
1383         val = tr32(MAC_EXT_RGMII_MODE);
1384         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1385                  MAC_RGMII_MODE_RX_QUALITY |
1386                  MAC_RGMII_MODE_RX_ACTIVITY |
1387                  MAC_RGMII_MODE_RX_ENG_DET |
1388                  MAC_RGMII_MODE_TX_ENABLE |
1389                  MAC_RGMII_MODE_TX_LOWPWR |
1390                  MAC_RGMII_MODE_TX_RESET);
1391         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1392                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1393                         val |= MAC_RGMII_MODE_RX_INT_B |
1394                                MAC_RGMII_MODE_RX_QUALITY |
1395                                MAC_RGMII_MODE_RX_ACTIVITY |
1396                                MAC_RGMII_MODE_RX_ENG_DET;
1397                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1398                         val |= MAC_RGMII_MODE_TX_ENABLE |
1399                                MAC_RGMII_MODE_TX_LOWPWR |
1400                                MAC_RGMII_MODE_TX_RESET;
1401         }
1402         tw32(MAC_EXT_RGMII_MODE, val);
1403 }
1404
1405 static void tg3_mdio_start(struct tg3 *tp)
1406 {
1407         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1408         tw32_f(MAC_MI_MODE, tp->mi_mode);
1409         udelay(80);
1410
1411         if (tg3_flag(tp, MDIOBUS_INITED) &&
1412             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1413                 tg3_mdio_config_5785(tp);
1414 }
1415
1416 static int tg3_mdio_init(struct tg3 *tp)
1417 {
1418         int i;
1419         u32 reg;
1420         struct phy_device *phydev;
1421
1422         if (tg3_flag(tp, 5717_PLUS)) {
1423                 u32 is_serdes;
1424
1425                 tp->phy_addr = tp->pci_fn + 1;
1426
1427                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1428                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1429                 else
1430                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1431                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1432                 if (is_serdes)
1433                         tp->phy_addr += 7;
1434         } else
1435                 tp->phy_addr = TG3_PHY_MII_ADDR;
1436
1437         tg3_mdio_start(tp);
1438
1439         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1440                 return 0;
1441
1442         tp->mdio_bus = mdiobus_alloc();
1443         if (tp->mdio_bus == NULL)
1444                 return -ENOMEM;
1445
1446         tp->mdio_bus->name     = "tg3 mdio bus";
1447         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1448                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1449         tp->mdio_bus->priv     = tp;
1450         tp->mdio_bus->parent   = &tp->pdev->dev;
1451         tp->mdio_bus->read     = &tg3_mdio_read;
1452         tp->mdio_bus->write    = &tg3_mdio_write;
1453         tp->mdio_bus->reset    = &tg3_mdio_reset;
1454         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1455         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1456
1457         for (i = 0; i < PHY_MAX_ADDR; i++)
1458                 tp->mdio_bus->irq[i] = PHY_POLL;
1459
1460         /* The bus registration will look for all the PHYs on the mdio bus.
1461          * Unfortunately, it does not ensure the PHY is powered up before
1462          * accessing the PHY ID registers.  A chip reset is the
1463          * quickest way to bring the device back to an operational state..
1464          */
1465         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1466                 tg3_bmcr_reset(tp);
1467
1468         i = mdiobus_register(tp->mdio_bus);
1469         if (i) {
1470                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1471                 mdiobus_free(tp->mdio_bus);
1472                 return i;
1473         }
1474
1475         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1476
1477         if (!phydev || !phydev->drv) {
1478                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1479                 mdiobus_unregister(tp->mdio_bus);
1480                 mdiobus_free(tp->mdio_bus);
1481                 return -ENODEV;
1482         }
1483
1484         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1485         case PHY_ID_BCM57780:
1486                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1487                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1488                 break;
1489         case PHY_ID_BCM50610:
1490         case PHY_ID_BCM50610M:
1491                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1492                                      PHY_BRCM_RX_REFCLK_UNUSED |
1493                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1494                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1495                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1496                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1497                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1498                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1499                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1500                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1501                 /* fallthru */
1502         case PHY_ID_RTL8211C:
1503                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1504                 break;
1505         case PHY_ID_RTL8201E:
1506         case PHY_ID_BCMAC131:
1507                 phydev->interface = PHY_INTERFACE_MODE_MII;
1508                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1509                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1510                 break;
1511         }
1512
1513         tg3_flag_set(tp, MDIOBUS_INITED);
1514
1515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1516                 tg3_mdio_config_5785(tp);
1517
1518         return 0;
1519 }
1520
1521 static void tg3_mdio_fini(struct tg3 *tp)
1522 {
1523         if (tg3_flag(tp, MDIOBUS_INITED)) {
1524                 tg3_flag_clear(tp, MDIOBUS_INITED);
1525                 mdiobus_unregister(tp->mdio_bus);
1526                 mdiobus_free(tp->mdio_bus);
1527         }
1528 }
1529
1530 /* tp->lock is held. */
1531 static inline void tg3_generate_fw_event(struct tg3 *tp)
1532 {
1533         u32 val;
1534
1535         val = tr32(GRC_RX_CPU_EVENT);
1536         val |= GRC_RX_CPU_DRIVER_EVENT;
1537         tw32_f(GRC_RX_CPU_EVENT, val);
1538
1539         tp->last_event_jiffies = jiffies;
1540 }
1541
1542 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1543
1544 /* tp->lock is held. */
1545 static void tg3_wait_for_event_ack(struct tg3 *tp)
1546 {
1547         int i;
1548         unsigned int delay_cnt;
1549         long time_remain;
1550
1551         /* If enough time has passed, no wait is necessary. */
1552         time_remain = (long)(tp->last_event_jiffies + 1 +
1553                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1554                       (long)jiffies;
1555         if (time_remain < 0)
1556                 return;
1557
1558         /* Check if we can shorten the wait time. */
1559         delay_cnt = jiffies_to_usecs(time_remain);
1560         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1561                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1562         delay_cnt = (delay_cnt >> 3) + 1;
1563
1564         for (i = 0; i < delay_cnt; i++) {
1565                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1566                         break;
1567                 udelay(8);
1568         }
1569 }
1570
1571 /* tp->lock is held. */
1572 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1573 {
1574         u32 reg, val;
1575
1576         val = 0;
1577         if (!tg3_readphy(tp, MII_BMCR, &reg))
1578                 val = reg << 16;
1579         if (!tg3_readphy(tp, MII_BMSR, &reg))
1580                 val |= (reg & 0xffff);
1581         *data++ = val;
1582
1583         val = 0;
1584         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1585                 val = reg << 16;
1586         if (!tg3_readphy(tp, MII_LPA, &reg))
1587                 val |= (reg & 0xffff);
1588         *data++ = val;
1589
1590         val = 0;
1591         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1592                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1593                         val = reg << 16;
1594                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1595                         val |= (reg & 0xffff);
1596         }
1597         *data++ = val;
1598
1599         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1600                 val = reg << 16;
1601         else
1602                 val = 0;
1603         *data++ = val;
1604 }
1605
1606 /* tp->lock is held. */
1607 static void tg3_ump_link_report(struct tg3 *tp)
1608 {
1609         u32 data[4];
1610
1611         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1612                 return;
1613
1614         tg3_phy_gather_ump_data(tp, data);
1615
1616         tg3_wait_for_event_ack(tp);
1617
1618         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1619         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1620         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1621         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1622         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1623         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1624
1625         tg3_generate_fw_event(tp);
1626 }
1627
1628 /* tp->lock is held. */
1629 static void tg3_stop_fw(struct tg3 *tp)
1630 {
1631         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1632                 /* Wait for RX cpu to ACK the previous event. */
1633                 tg3_wait_for_event_ack(tp);
1634
1635                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1636
1637                 tg3_generate_fw_event(tp);
1638
1639                 /* Wait for RX cpu to ACK this event. */
1640                 tg3_wait_for_event_ack(tp);
1641         }
1642 }
1643
1644 /* tp->lock is held. */
1645 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1646 {
1647         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1648                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1649
1650         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1651                 switch (kind) {
1652                 case RESET_KIND_INIT:
1653                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1654                                       DRV_STATE_START);
1655                         break;
1656
1657                 case RESET_KIND_SHUTDOWN:
1658                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1659                                       DRV_STATE_UNLOAD);
1660                         break;
1661
1662                 case RESET_KIND_SUSPEND:
1663                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1664                                       DRV_STATE_SUSPEND);
1665                         break;
1666
1667                 default:
1668                         break;
1669                 }
1670         }
1671
1672         if (kind == RESET_KIND_INIT ||
1673             kind == RESET_KIND_SUSPEND)
1674                 tg3_ape_driver_state_change(tp, kind);
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1679 {
1680         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1681                 switch (kind) {
1682                 case RESET_KIND_INIT:
1683                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1684                                       DRV_STATE_START_DONE);
1685                         break;
1686
1687                 case RESET_KIND_SHUTDOWN:
1688                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1689                                       DRV_STATE_UNLOAD_DONE);
1690                         break;
1691
1692                 default:
1693                         break;
1694                 }
1695         }
1696
1697         if (kind == RESET_KIND_SHUTDOWN)
1698                 tg3_ape_driver_state_change(tp, kind);
1699 }
1700
1701 /* tp->lock is held. */
1702 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1703 {
1704         if (tg3_flag(tp, ENABLE_ASF)) {
1705                 switch (kind) {
1706                 case RESET_KIND_INIT:
1707                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1708                                       DRV_STATE_START);
1709                         break;
1710
1711                 case RESET_KIND_SHUTDOWN:
1712                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1713                                       DRV_STATE_UNLOAD);
1714                         break;
1715
1716                 case RESET_KIND_SUSPEND:
1717                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1718                                       DRV_STATE_SUSPEND);
1719                         break;
1720
1721                 default:
1722                         break;
1723                 }
1724         }
1725 }
1726
1727 static int tg3_poll_fw(struct tg3 *tp)
1728 {
1729         int i;
1730         u32 val;
1731
1732         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1733                 /* Wait up to 20ms for init done. */
1734                 for (i = 0; i < 200; i++) {
1735                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1736                                 return 0;
1737                         udelay(100);
1738                 }
1739                 return -ENODEV;
1740         }
1741
1742         /* Wait for firmware initialization to complete. */
1743         for (i = 0; i < 100000; i++) {
1744                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1745                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1746                         break;
1747                 udelay(10);
1748         }
1749
1750         /* Chip might not be fitted with firmware.  Some Sun onboard
1751          * parts are configured like that.  So don't signal the timeout
1752          * of the above loop as an error, but do report the lack of
1753          * running firmware once.
1754          */
1755         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1756                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1757
1758                 netdev_info(tp->dev, "No firmware running\n");
1759         }
1760
1761         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1762                 /* The 57765 A0 needs a little more
1763                  * time to do some important work.
1764                  */
1765                 mdelay(10);
1766         }
1767
1768         return 0;
1769 }
1770
1771 static void tg3_link_report(struct tg3 *tp)
1772 {
1773         if (!netif_carrier_ok(tp->dev)) {
1774                 netif_info(tp, link, tp->dev, "Link is down\n");
1775                 tg3_ump_link_report(tp);
1776         } else if (netif_msg_link(tp)) {
1777                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1778                             (tp->link_config.active_speed == SPEED_1000 ?
1779                              1000 :
1780                              (tp->link_config.active_speed == SPEED_100 ?
1781                               100 : 10)),
1782                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1783                              "full" : "half"));
1784
1785                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1786                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1787                             "on" : "off",
1788                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1789                             "on" : "off");
1790
1791                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1792                         netdev_info(tp->dev, "EEE is %s\n",
1793                                     tp->setlpicnt ? "enabled" : "disabled");
1794
1795                 tg3_ump_link_report(tp);
1796         }
1797 }
1798
1799 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1800 {
1801         u16 miireg;
1802
1803         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1804                 miireg = ADVERTISE_1000XPAUSE;
1805         else if (flow_ctrl & FLOW_CTRL_TX)
1806                 miireg = ADVERTISE_1000XPSE_ASYM;
1807         else if (flow_ctrl & FLOW_CTRL_RX)
1808                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1809         else
1810                 miireg = 0;
1811
1812         return miireg;
1813 }
1814
1815 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1816 {
1817         u8 cap = 0;
1818
1819         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1820                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1821         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1822                 if (lcladv & ADVERTISE_1000XPAUSE)
1823                         cap = FLOW_CTRL_RX;
1824                 if (rmtadv & ADVERTISE_1000XPAUSE)
1825                         cap = FLOW_CTRL_TX;
1826         }
1827
1828         return cap;
1829 }
1830
1831 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1832 {
1833         u8 autoneg;
1834         u8 flowctrl = 0;
1835         u32 old_rx_mode = tp->rx_mode;
1836         u32 old_tx_mode = tp->tx_mode;
1837
1838         if (tg3_flag(tp, USE_PHYLIB))
1839                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1840         else
1841                 autoneg = tp->link_config.autoneg;
1842
1843         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1844                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1845                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1846                 else
1847                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1848         } else
1849                 flowctrl = tp->link_config.flowctrl;
1850
1851         tp->link_config.active_flowctrl = flowctrl;
1852
1853         if (flowctrl & FLOW_CTRL_RX)
1854                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1855         else
1856                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1857
1858         if (old_rx_mode != tp->rx_mode)
1859                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1860
1861         if (flowctrl & FLOW_CTRL_TX)
1862                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1863         else
1864                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1865
1866         if (old_tx_mode != tp->tx_mode)
1867                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1868 }
1869
1870 static void tg3_adjust_link(struct net_device *dev)
1871 {
1872         u8 oldflowctrl, linkmesg = 0;
1873         u32 mac_mode, lcl_adv, rmt_adv;
1874         struct tg3 *tp = netdev_priv(dev);
1875         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1876
1877         spin_lock_bh(&tp->lock);
1878
1879         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1880                                     MAC_MODE_HALF_DUPLEX);
1881
1882         oldflowctrl = tp->link_config.active_flowctrl;
1883
1884         if (phydev->link) {
1885                 lcl_adv = 0;
1886                 rmt_adv = 0;
1887
1888                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1889                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1890                 else if (phydev->speed == SPEED_1000 ||
1891                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1892                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1893                 else
1894                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1895
1896                 if (phydev->duplex == DUPLEX_HALF)
1897                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1898                 else {
1899                         lcl_adv = mii_advertise_flowctrl(
1900                                   tp->link_config.flowctrl);
1901
1902                         if (phydev->pause)
1903                                 rmt_adv = LPA_PAUSE_CAP;
1904                         if (phydev->asym_pause)
1905                                 rmt_adv |= LPA_PAUSE_ASYM;
1906                 }
1907
1908                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1909         } else
1910                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1911
1912         if (mac_mode != tp->mac_mode) {
1913                 tp->mac_mode = mac_mode;
1914                 tw32_f(MAC_MODE, tp->mac_mode);
1915                 udelay(40);
1916         }
1917
1918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1919                 if (phydev->speed == SPEED_10)
1920                         tw32(MAC_MI_STAT,
1921                              MAC_MI_STAT_10MBPS_MODE |
1922                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1923                 else
1924                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1925         }
1926
1927         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1928                 tw32(MAC_TX_LENGTHS,
1929                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1930                       (6 << TX_LENGTHS_IPG_SHIFT) |
1931                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1932         else
1933                 tw32(MAC_TX_LENGTHS,
1934                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1935                       (6 << TX_LENGTHS_IPG_SHIFT) |
1936                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1937
1938         if (phydev->link != tp->old_link ||
1939             phydev->speed != tp->link_config.active_speed ||
1940             phydev->duplex != tp->link_config.active_duplex ||
1941             oldflowctrl != tp->link_config.active_flowctrl)
1942                 linkmesg = 1;
1943
1944         tp->old_link = phydev->link;
1945         tp->link_config.active_speed = phydev->speed;
1946         tp->link_config.active_duplex = phydev->duplex;
1947
1948         spin_unlock_bh(&tp->lock);
1949
1950         if (linkmesg)
1951                 tg3_link_report(tp);
1952 }
1953
1954 static int tg3_phy_init(struct tg3 *tp)
1955 {
1956         struct phy_device *phydev;
1957
1958         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1959                 return 0;
1960
1961         /* Bring the PHY back to a known state. */
1962         tg3_bmcr_reset(tp);
1963
1964         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1965
1966         /* Attach the MAC to the PHY. */
1967         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1968                              phydev->dev_flags, phydev->interface);
1969         if (IS_ERR(phydev)) {
1970                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1971                 return PTR_ERR(phydev);
1972         }
1973
1974         /* Mask with MAC supported features. */
1975         switch (phydev->interface) {
1976         case PHY_INTERFACE_MODE_GMII:
1977         case PHY_INTERFACE_MODE_RGMII:
1978                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1979                         phydev->supported &= (PHY_GBIT_FEATURES |
1980                                               SUPPORTED_Pause |
1981                                               SUPPORTED_Asym_Pause);
1982                         break;
1983                 }
1984                 /* fallthru */
1985         case PHY_INTERFACE_MODE_MII:
1986                 phydev->supported &= (PHY_BASIC_FEATURES |
1987                                       SUPPORTED_Pause |
1988                                       SUPPORTED_Asym_Pause);
1989                 break;
1990         default:
1991                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1992                 return -EINVAL;
1993         }
1994
1995         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1996
1997         phydev->advertising = phydev->supported;
1998
1999         return 0;
2000 }
2001
2002 static void tg3_phy_start(struct tg3 *tp)
2003 {
2004         struct phy_device *phydev;
2005
2006         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2007                 return;
2008
2009         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2010
2011         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2012                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2013                 phydev->speed = tp->link_config.speed;
2014                 phydev->duplex = tp->link_config.duplex;
2015                 phydev->autoneg = tp->link_config.autoneg;
2016                 phydev->advertising = tp->link_config.advertising;
2017         }
2018
2019         phy_start(phydev);
2020
2021         phy_start_aneg(phydev);
2022 }
2023
2024 static void tg3_phy_stop(struct tg3 *tp)
2025 {
2026         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2027                 return;
2028
2029         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2030 }
2031
2032 static void tg3_phy_fini(struct tg3 *tp)
2033 {
2034         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2035                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2036                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2037         }
2038 }
2039
2040 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2041 {
2042         int err;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2046                 return 0;
2047
2048         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2049                 /* Cannot do read-modify-write on 5401 */
2050                 err = tg3_phy_auxctl_write(tp,
2051                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2052                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2053                                            0x4c20);
2054                 goto done;
2055         }
2056
2057         err = tg3_phy_auxctl_read(tp,
2058                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2059         if (err)
2060                 return err;
2061
2062         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2063         err = tg3_phy_auxctl_write(tp,
2064                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2065
2066 done:
2067         return err;
2068 }
2069
2070 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2071 {
2072         u32 phytest;
2073
2074         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2075                 u32 phy;
2076
2077                 tg3_writephy(tp, MII_TG3_FET_TEST,
2078                              phytest | MII_TG3_FET_SHADOW_EN);
2079                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2080                         if (enable)
2081                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2082                         else
2083                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2084                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2085                 }
2086                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2087         }
2088 }
2089
2090 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2091 {
2092         u32 reg;
2093
2094         if (!tg3_flag(tp, 5705_PLUS) ||
2095             (tg3_flag(tp, 5717_PLUS) &&
2096              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2097                 return;
2098
2099         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2100                 tg3_phy_fet_toggle_apd(tp, enable);
2101                 return;
2102         }
2103
2104         reg = MII_TG3_MISC_SHDW_WREN |
2105               MII_TG3_MISC_SHDW_SCR5_SEL |
2106               MII_TG3_MISC_SHDW_SCR5_LPED |
2107               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2108               MII_TG3_MISC_SHDW_SCR5_SDTL |
2109               MII_TG3_MISC_SHDW_SCR5_C125OE;
2110         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2111                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2112
2113         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2114
2115
2116         reg = MII_TG3_MISC_SHDW_WREN |
2117               MII_TG3_MISC_SHDW_APD_SEL |
2118               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2119         if (enable)
2120                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2121
2122         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2123 }
2124
2125 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2126 {
2127         u32 phy;
2128
2129         if (!tg3_flag(tp, 5705_PLUS) ||
2130             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2131                 return;
2132
2133         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2134                 u32 ephy;
2135
2136                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2137                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2138
2139                         tg3_writephy(tp, MII_TG3_FET_TEST,
2140                                      ephy | MII_TG3_FET_SHADOW_EN);
2141                         if (!tg3_readphy(tp, reg, &phy)) {
2142                                 if (enable)
2143                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2144                                 else
2145                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2146                                 tg3_writephy(tp, reg, phy);
2147                         }
2148                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2149                 }
2150         } else {
2151                 int ret;
2152
2153                 ret = tg3_phy_auxctl_read(tp,
2154                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2155                 if (!ret) {
2156                         if (enable)
2157                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2158                         else
2159                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2160                         tg3_phy_auxctl_write(tp,
2161                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2162                 }
2163         }
2164 }
2165
2166 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2167 {
2168         int ret;
2169         u32 val;
2170
2171         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2172                 return;
2173
2174         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2175         if (!ret)
2176                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2177                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2178 }
2179
2180 static void tg3_phy_apply_otp(struct tg3 *tp)
2181 {
2182         u32 otp, phy;
2183
2184         if (!tp->phy_otp)
2185                 return;
2186
2187         otp = tp->phy_otp;
2188
2189         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2190                 return;
2191
2192         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2193         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2194         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2195
2196         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2197               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2198         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2199
2200         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2201         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2202         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2203
2204         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2205         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2206
2207         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2208         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2209
2210         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2211               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2212         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2213
2214         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2215 }
2216
2217 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2218 {
2219         u32 val;
2220
2221         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2222                 return;
2223
2224         tp->setlpicnt = 0;
2225
2226         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2227             current_link_up == 1 &&
2228             tp->link_config.active_duplex == DUPLEX_FULL &&
2229             (tp->link_config.active_speed == SPEED_100 ||
2230              tp->link_config.active_speed == SPEED_1000)) {
2231                 u32 eeectl;
2232
2233                 if (tp->link_config.active_speed == SPEED_1000)
2234                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2235                 else
2236                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2237
2238                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2239
2240                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2241                                   TG3_CL45_D7_EEERES_STAT, &val);
2242
2243                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2244                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2245                         tp->setlpicnt = 2;
2246         }
2247
2248         if (!tp->setlpicnt) {
2249                 if (current_link_up == 1 &&
2250                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2251                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2252                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2253                 }
2254
2255                 val = tr32(TG3_CPMU_EEE_MODE);
2256                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2257         }
2258 }
2259
2260 static void tg3_phy_eee_enable(struct tg3 *tp)
2261 {
2262         u32 val;
2263
2264         if (tp->link_config.active_speed == SPEED_1000 &&
2265             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2266              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2267              tg3_flag(tp, 57765_CLASS)) &&
2268             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2269                 val = MII_TG3_DSP_TAP26_ALNOKO |
2270                       MII_TG3_DSP_TAP26_RMRXSTO;
2271                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2272                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2273         }
2274
2275         val = tr32(TG3_CPMU_EEE_MODE);
2276         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2277 }
2278
2279 static int tg3_wait_macro_done(struct tg3 *tp)
2280 {
2281         int limit = 100;
2282
2283         while (limit--) {
2284                 u32 tmp32;
2285
2286                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2287                         if ((tmp32 & 0x1000) == 0)
2288                                 break;
2289                 }
2290         }
2291         if (limit < 0)
2292                 return -EBUSY;
2293
2294         return 0;
2295 }
2296
2297 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2298 {
2299         static const u32 test_pat[4][6] = {
2300         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2301         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2302         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2303         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2304         };
2305         int chan;
2306
2307         for (chan = 0; chan < 4; chan++) {
2308                 int i;
2309
2310                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2311                              (chan * 0x2000) | 0x0200);
2312                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2313
2314                 for (i = 0; i < 6; i++)
2315                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2316                                      test_pat[chan][i]);
2317
2318                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2319                 if (tg3_wait_macro_done(tp)) {
2320                         *resetp = 1;
2321                         return -EBUSY;
2322                 }
2323
2324                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2325                              (chan * 0x2000) | 0x0200);
2326                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2327                 if (tg3_wait_macro_done(tp)) {
2328                         *resetp = 1;
2329                         return -EBUSY;
2330                 }
2331
2332                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2333                 if (tg3_wait_macro_done(tp)) {
2334                         *resetp = 1;
2335                         return -EBUSY;
2336                 }
2337
2338                 for (i = 0; i < 6; i += 2) {
2339                         u32 low, high;
2340
2341                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2342                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2343                             tg3_wait_macro_done(tp)) {
2344                                 *resetp = 1;
2345                                 return -EBUSY;
2346                         }
2347                         low &= 0x7fff;
2348                         high &= 0x000f;
2349                         if (low != test_pat[chan][i] ||
2350                             high != test_pat[chan][i+1]) {
2351                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2352                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2353                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2354
2355                                 return -EBUSY;
2356                         }
2357                 }
2358         }
2359
2360         return 0;
2361 }
2362
2363 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2364 {
2365         int chan;
2366
2367         for (chan = 0; chan < 4; chan++) {
2368                 int i;
2369
2370                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2371                              (chan * 0x2000) | 0x0200);
2372                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2373                 for (i = 0; i < 6; i++)
2374                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2375                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2376                 if (tg3_wait_macro_done(tp))
2377                         return -EBUSY;
2378         }
2379
2380         return 0;
2381 }
2382
2383 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2384 {
2385         u32 reg32, phy9_orig;
2386         int retries, do_phy_reset, err;
2387
2388         retries = 10;
2389         do_phy_reset = 1;
2390         do {
2391                 if (do_phy_reset) {
2392                         err = tg3_bmcr_reset(tp);
2393                         if (err)
2394                                 return err;
2395                         do_phy_reset = 0;
2396                 }
2397
2398                 /* Disable transmitter and interrupt.  */
2399                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2400                         continue;
2401
2402                 reg32 |= 0x3000;
2403                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2404
2405                 /* Set full-duplex, 1000 mbps.  */
2406                 tg3_writephy(tp, MII_BMCR,
2407                              BMCR_FULLDPLX | BMCR_SPEED1000);
2408
2409                 /* Set to master mode.  */
2410                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2411                         continue;
2412
2413                 tg3_writephy(tp, MII_CTRL1000,
2414                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2415
2416                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2417                 if (err)
2418                         return err;
2419
2420                 /* Block the PHY control access.  */
2421                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2422
2423                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2424                 if (!err)
2425                         break;
2426         } while (--retries);
2427
2428         err = tg3_phy_reset_chanpat(tp);
2429         if (err)
2430                 return err;
2431
2432         tg3_phydsp_write(tp, 0x8005, 0x0000);
2433
2434         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2435         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2436
2437         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2438
2439         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2440
2441         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2442                 reg32 &= ~0x3000;
2443                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2444         } else if (!err)
2445                 err = -EBUSY;
2446
2447         return err;
2448 }
2449
2450 /* This will reset the tigon3 PHY if there is no valid
2451  * link unless the FORCE argument is non-zero.
2452  */
2453 static int tg3_phy_reset(struct tg3 *tp)
2454 {
2455         u32 val, cpmuctrl;
2456         int err;
2457
2458         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2459                 val = tr32(GRC_MISC_CFG);
2460                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2461                 udelay(40);
2462         }
2463         err  = tg3_readphy(tp, MII_BMSR, &val);
2464         err |= tg3_readphy(tp, MII_BMSR, &val);
2465         if (err != 0)
2466                 return -EBUSY;
2467
2468         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2469                 netif_carrier_off(tp->dev);
2470                 tg3_link_report(tp);
2471         }
2472
2473         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2474             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2476                 err = tg3_phy_reset_5703_4_5(tp);
2477                 if (err)
2478                         return err;
2479                 goto out;
2480         }
2481
2482         cpmuctrl = 0;
2483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2484             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2485                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2486                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2487                         tw32(TG3_CPMU_CTRL,
2488                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2489         }
2490
2491         err = tg3_bmcr_reset(tp);
2492         if (err)
2493                 return err;
2494
2495         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2496                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2497                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2498
2499                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2500         }
2501
2502         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2503             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2504                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2505                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2506                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2507                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2508                         udelay(40);
2509                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2510                 }
2511         }
2512
2513         if (tg3_flag(tp, 5717_PLUS) &&
2514             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2515                 return 0;
2516
2517         tg3_phy_apply_otp(tp);
2518
2519         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2520                 tg3_phy_toggle_apd(tp, true);
2521         else
2522                 tg3_phy_toggle_apd(tp, false);
2523
2524 out:
2525         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2526             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2527                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2528                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2529                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2530         }
2531
2532         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2533                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2534                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535         }
2536
2537         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2538                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2539                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2540                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2541                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2542                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2543                 }
2544         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2545                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2546                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2547                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2548                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2549                                 tg3_writephy(tp, MII_TG3_TEST1,
2550                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2551                         } else
2552                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2553
2554                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2555                 }
2556         }
2557
2558         /* Set Extended packet length bit (bit 14) on all chips that */
2559         /* support jumbo frames */
2560         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2561                 /* Cannot do read-modify-write on 5401 */
2562                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2563         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2564                 /* Set bit 14 with read-modify-write to preserve other bits */
2565                 err = tg3_phy_auxctl_read(tp,
2566                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2567                 if (!err)
2568                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2569                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2570         }
2571
2572         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2573          * jumbo frames transmission.
2574          */
2575         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2576                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2577                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2578                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2579         }
2580
2581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2582                 /* adjust output voltage */
2583                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2584         }
2585
2586         tg3_phy_toggle_automdix(tp, 1);
2587         tg3_phy_set_wirespeed(tp);
2588         return 0;
2589 }
2590
2591 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2592 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2593 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2594                                           TG3_GPIO_MSG_NEED_VAUX)
2595 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2596         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2597          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2598          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2599          (TG3_GPIO_MSG_DRVR_PRES << 12))
2600
2601 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2602         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2603          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2604          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2605          (TG3_GPIO_MSG_NEED_VAUX << 12))
2606
2607 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2608 {
2609         u32 status, shift;
2610
2611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2612             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2613                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2614         else
2615                 status = tr32(TG3_CPMU_DRV_STATUS);
2616
2617         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2618         status &= ~(TG3_GPIO_MSG_MASK << shift);
2619         status |= (newstat << shift);
2620
2621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2622             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2623                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2624         else
2625                 tw32(TG3_CPMU_DRV_STATUS, status);
2626
2627         return status >> TG3_APE_GPIO_MSG_SHIFT;
2628 }
2629
2630 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2631 {
2632         if (!tg3_flag(tp, IS_NIC))
2633                 return 0;
2634
2635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2638                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2639                         return -EIO;
2640
2641                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2642
2643                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2644                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2645
2646                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2647         } else {
2648                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2649                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2650         }
2651
2652         return 0;
2653 }
2654
2655 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2656 {
2657         u32 grc_local_ctrl;
2658
2659         if (!tg3_flag(tp, IS_NIC) ||
2660             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2662                 return;
2663
2664         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2665
2666         tw32_wait_f(GRC_LOCAL_CTRL,
2667                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2668                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2669
2670         tw32_wait_f(GRC_LOCAL_CTRL,
2671                     grc_local_ctrl,
2672                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2673
2674         tw32_wait_f(GRC_LOCAL_CTRL,
2675                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2676                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2677 }
2678
2679 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2680 {
2681         if (!tg3_flag(tp, IS_NIC))
2682                 return;
2683
2684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2686                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2687                             (GRC_LCLCTRL_GPIO_OE0 |
2688                              GRC_LCLCTRL_GPIO_OE1 |
2689                              GRC_LCLCTRL_GPIO_OE2 |
2690                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2691                              GRC_LCLCTRL_GPIO_OUTPUT1),
2692                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2693         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2694                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2695                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2696                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2697                                      GRC_LCLCTRL_GPIO_OE1 |
2698                                      GRC_LCLCTRL_GPIO_OE2 |
2699                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2700                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2701                                      tp->grc_local_ctrl;
2702                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2703                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2704
2705                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2706                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2707                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2708
2709                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2710                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2711                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2712         } else {
2713                 u32 no_gpio2;
2714                 u32 grc_local_ctrl = 0;
2715
2716                 /* Workaround to prevent overdrawing Amps. */
2717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2718                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2719                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2720                                     grc_local_ctrl,
2721                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2722                 }
2723
2724                 /* On 5753 and variants, GPIO2 cannot be used. */
2725                 no_gpio2 = tp->nic_sram_data_cfg &
2726                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2727
2728                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2729                                   GRC_LCLCTRL_GPIO_OE1 |
2730                                   GRC_LCLCTRL_GPIO_OE2 |
2731                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2732                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2733                 if (no_gpio2) {
2734                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2735                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2736                 }
2737                 tw32_wait_f(GRC_LOCAL_CTRL,
2738                             tp->grc_local_ctrl | grc_local_ctrl,
2739                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2740
2741                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2742
2743                 tw32_wait_f(GRC_LOCAL_CTRL,
2744                             tp->grc_local_ctrl | grc_local_ctrl,
2745                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2746
2747                 if (!no_gpio2) {
2748                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2749                         tw32_wait_f(GRC_LOCAL_CTRL,
2750                                     tp->grc_local_ctrl | grc_local_ctrl,
2751                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2752                 }
2753         }
2754 }
2755
2756 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2757 {
2758         u32 msg = 0;
2759
2760         /* Serialize power state transitions */
2761         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2762                 return;
2763
2764         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2765                 msg = TG3_GPIO_MSG_NEED_VAUX;
2766
2767         msg = tg3_set_function_status(tp, msg);
2768
2769         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2770                 goto done;
2771
2772         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2773                 tg3_pwrsrc_switch_to_vaux(tp);
2774         else
2775                 tg3_pwrsrc_die_with_vmain(tp);
2776
2777 done:
2778         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2779 }
2780
2781 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2782 {
2783         bool need_vaux = false;
2784
2785         /* The GPIOs do something completely different on 57765. */
2786         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2787                 return;
2788
2789         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2792                 tg3_frob_aux_power_5717(tp, include_wol ?
2793                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2794                 return;
2795         }
2796
2797         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2798                 struct net_device *dev_peer;
2799
2800                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2801
2802                 /* remove_one() may have been run on the peer. */
2803                 if (dev_peer) {
2804                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2805
2806                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2807                                 return;
2808
2809                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2810                             tg3_flag(tp_peer, ENABLE_ASF))
2811                                 need_vaux = true;
2812                 }
2813         }
2814
2815         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2816             tg3_flag(tp, ENABLE_ASF))
2817                 need_vaux = true;
2818
2819         if (need_vaux)
2820                 tg3_pwrsrc_switch_to_vaux(tp);
2821         else
2822                 tg3_pwrsrc_die_with_vmain(tp);
2823 }
2824
2825 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2826 {
2827         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2828                 return 1;
2829         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2830                 if (speed != SPEED_10)
2831                         return 1;
2832         } else if (speed == SPEED_10)
2833                 return 1;
2834
2835         return 0;
2836 }
2837
2838 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2839 {
2840         u32 val;
2841
2842         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2843                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2844                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2845                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2846
2847                         sg_dig_ctrl |=
2848                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2849                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2850                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2851                 }
2852                 return;
2853         }
2854
2855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2856                 tg3_bmcr_reset(tp);
2857                 val = tr32(GRC_MISC_CFG);
2858                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2859                 udelay(40);
2860                 return;
2861         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2862                 u32 phytest;
2863                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2864                         u32 phy;
2865
2866                         tg3_writephy(tp, MII_ADVERTISE, 0);
2867                         tg3_writephy(tp, MII_BMCR,
2868                                      BMCR_ANENABLE | BMCR_ANRESTART);
2869
2870                         tg3_writephy(tp, MII_TG3_FET_TEST,
2871                                      phytest | MII_TG3_FET_SHADOW_EN);
2872                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2873                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2874                                 tg3_writephy(tp,
2875                                              MII_TG3_FET_SHDW_AUXMODE4,
2876                                              phy);
2877                         }
2878                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2879                 }
2880                 return;
2881         } else if (do_low_power) {
2882                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2883                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2884
2885                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2886                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2887                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2888                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2889         }
2890
2891         /* The PHY should not be powered down on some chips because
2892          * of bugs.
2893          */
2894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2895             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2896             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2897              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2898             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2899              !tp->pci_fn))
2900                 return;
2901
2902         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2903             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2904                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2905                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2906                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2907                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2908         }
2909
2910         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2911 }
2912
2913 /* tp->lock is held. */
2914 static int tg3_nvram_lock(struct tg3 *tp)
2915 {
2916         if (tg3_flag(tp, NVRAM)) {
2917                 int i;
2918
2919                 if (tp->nvram_lock_cnt == 0) {
2920                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2921                         for (i = 0; i < 8000; i++) {
2922                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2923                                         break;
2924                                 udelay(20);
2925                         }
2926                         if (i == 8000) {
2927                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2928                                 return -ENODEV;
2929                         }
2930                 }
2931                 tp->nvram_lock_cnt++;
2932         }
2933         return 0;
2934 }
2935
2936 /* tp->lock is held. */
2937 static void tg3_nvram_unlock(struct tg3 *tp)
2938 {
2939         if (tg3_flag(tp, NVRAM)) {
2940                 if (tp->nvram_lock_cnt > 0)
2941                         tp->nvram_lock_cnt--;
2942                 if (tp->nvram_lock_cnt == 0)
2943                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2944         }
2945 }
2946
2947 /* tp->lock is held. */
2948 static void tg3_enable_nvram_access(struct tg3 *tp)
2949 {
2950         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2951                 u32 nvaccess = tr32(NVRAM_ACCESS);
2952
2953                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2954         }
2955 }
2956
2957 /* tp->lock is held. */
2958 static void tg3_disable_nvram_access(struct tg3 *tp)
2959 {
2960         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2961                 u32 nvaccess = tr32(NVRAM_ACCESS);
2962
2963                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2964         }
2965 }
2966
2967 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2968                                         u32 offset, u32 *val)
2969 {
2970         u32 tmp;
2971         int i;
2972
2973         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2974                 return -EINVAL;
2975
2976         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2977                                         EEPROM_ADDR_DEVID_MASK |
2978                                         EEPROM_ADDR_READ);
2979         tw32(GRC_EEPROM_ADDR,
2980              tmp |
2981              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2982              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2983               EEPROM_ADDR_ADDR_MASK) |
2984              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2985
2986         for (i = 0; i < 1000; i++) {
2987                 tmp = tr32(GRC_EEPROM_ADDR);
2988
2989                 if (tmp & EEPROM_ADDR_COMPLETE)
2990                         break;
2991                 msleep(1);
2992         }
2993         if (!(tmp & EEPROM_ADDR_COMPLETE))
2994                 return -EBUSY;
2995
2996         tmp = tr32(GRC_EEPROM_DATA);
2997
2998         /*
2999          * The data will always be opposite the native endian
3000          * format.  Perform a blind byteswap to compensate.
3001          */
3002         *val = swab32(tmp);
3003
3004         return 0;
3005 }
3006
3007 #define NVRAM_CMD_TIMEOUT 10000
3008
3009 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3010 {
3011         int i;
3012
3013         tw32(NVRAM_CMD, nvram_cmd);
3014         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3015                 udelay(10);
3016                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3017                         udelay(10);
3018                         break;
3019                 }
3020         }
3021
3022         if (i == NVRAM_CMD_TIMEOUT)
3023                 return -EBUSY;
3024
3025         return 0;
3026 }
3027
3028 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3029 {
3030         if (tg3_flag(tp, NVRAM) &&
3031             tg3_flag(tp, NVRAM_BUFFERED) &&
3032             tg3_flag(tp, FLASH) &&
3033             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3034             (tp->nvram_jedecnum == JEDEC_ATMEL))
3035
3036                 addr = ((addr / tp->nvram_pagesize) <<
3037                         ATMEL_AT45DB0X1B_PAGE_POS) +
3038                        (addr % tp->nvram_pagesize);
3039
3040         return addr;
3041 }
3042
3043 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3044 {
3045         if (tg3_flag(tp, NVRAM) &&
3046             tg3_flag(tp, NVRAM_BUFFERED) &&
3047             tg3_flag(tp, FLASH) &&
3048             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3049             (tp->nvram_jedecnum == JEDEC_ATMEL))
3050
3051                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3052                         tp->nvram_pagesize) +
3053                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3054
3055         return addr;
3056 }
3057
3058 /* NOTE: Data read in from NVRAM is byteswapped according to
3059  * the byteswapping settings for all other register accesses.
3060  * tg3 devices are BE devices, so on a BE machine, the data
3061  * returned will be exactly as it is seen in NVRAM.  On a LE
3062  * machine, the 32-bit value will be byteswapped.
3063  */
3064 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3065 {
3066         int ret;
3067
3068         if (!tg3_flag(tp, NVRAM))
3069                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3070
3071         offset = tg3_nvram_phys_addr(tp, offset);
3072
3073         if (offset > NVRAM_ADDR_MSK)
3074                 return -EINVAL;
3075
3076         ret = tg3_nvram_lock(tp);
3077         if (ret)
3078                 return ret;
3079
3080         tg3_enable_nvram_access(tp);
3081
3082         tw32(NVRAM_ADDR, offset);
3083         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3084                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3085
3086         if (ret == 0)
3087                 *val = tr32(NVRAM_RDDATA);
3088
3089         tg3_disable_nvram_access(tp);
3090
3091         tg3_nvram_unlock(tp);
3092
3093         return ret;
3094 }
3095
3096 /* Ensures NVRAM data is in bytestream format. */
3097 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3098 {
3099         u32 v;
3100         int res = tg3_nvram_read(tp, offset, &v);
3101         if (!res)
3102                 *val = cpu_to_be32(v);
3103         return res;
3104 }
3105
3106 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3107                                     u32 offset, u32 len, u8 *buf)
3108 {
3109         int i, j, rc = 0;
3110         u32 val;
3111
3112         for (i = 0; i < len; i += 4) {
3113                 u32 addr;
3114                 __be32 data;
3115
3116                 addr = offset + i;
3117
3118                 memcpy(&data, buf + i, 4);
3119
3120                 /*
3121                  * The SEEPROM interface expects the data to always be opposite
3122                  * the native endian format.  We accomplish this by reversing
3123                  * all the operations that would have been performed on the
3124                  * data from a call to tg3_nvram_read_be32().
3125                  */
3126                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3127
3128                 val = tr32(GRC_EEPROM_ADDR);
3129                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3130
3131                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3132                         EEPROM_ADDR_READ);
3133                 tw32(GRC_EEPROM_ADDR, val |
3134                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3135                         (addr & EEPROM_ADDR_ADDR_MASK) |
3136                         EEPROM_ADDR_START |
3137                         EEPROM_ADDR_WRITE);
3138
3139                 for (j = 0; j < 1000; j++) {
3140                         val = tr32(GRC_EEPROM_ADDR);
3141
3142                         if (val & EEPROM_ADDR_COMPLETE)
3143                                 break;
3144                         msleep(1);
3145                 }
3146                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3147                         rc = -EBUSY;
3148                         break;
3149                 }
3150         }
3151
3152         return rc;
3153 }
3154
3155 /* offset and length are dword aligned */
3156 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3157                 u8 *buf)
3158 {
3159         int ret = 0;
3160         u32 pagesize = tp->nvram_pagesize;
3161         u32 pagemask = pagesize - 1;
3162         u32 nvram_cmd;
3163         u8 *tmp;
3164
3165         tmp = kmalloc(pagesize, GFP_KERNEL);
3166         if (tmp == NULL)
3167                 return -ENOMEM;
3168
3169         while (len) {
3170                 int j;
3171                 u32 phy_addr, page_off, size;
3172
3173                 phy_addr = offset & ~pagemask;
3174
3175                 for (j = 0; j < pagesize; j += 4) {
3176                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3177                                                   (__be32 *) (tmp + j));
3178                         if (ret)
3179                                 break;
3180                 }
3181                 if (ret)
3182                         break;
3183
3184                 page_off = offset & pagemask;
3185                 size = pagesize;
3186                 if (len < size)
3187                         size = len;
3188
3189                 len -= size;
3190
3191                 memcpy(tmp + page_off, buf, size);
3192
3193                 offset = offset + (pagesize - page_off);
3194
3195                 tg3_enable_nvram_access(tp);
3196
3197                 /*
3198                  * Before we can erase the flash page, we need
3199                  * to issue a special "write enable" command.
3200                  */
3201                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3202
3203                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3204                         break;
3205
3206                 /* Erase the target page */
3207                 tw32(NVRAM_ADDR, phy_addr);
3208
3209                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3210                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3211
3212                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3213                         break;
3214
3215                 /* Issue another write enable to start the write. */
3216                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3217
3218                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3219                         break;
3220
3221                 for (j = 0; j < pagesize; j += 4) {
3222                         __be32 data;
3223
3224                         data = *((__be32 *) (tmp + j));
3225
3226                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3227
3228                         tw32(NVRAM_ADDR, phy_addr + j);
3229
3230                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3231                                 NVRAM_CMD_WR;
3232
3233                         if (j == 0)
3234                                 nvram_cmd |= NVRAM_CMD_FIRST;
3235                         else if (j == (pagesize - 4))
3236                                 nvram_cmd |= NVRAM_CMD_LAST;
3237
3238                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3239                         if (ret)
3240                                 break;
3241                 }
3242                 if (ret)
3243                         break;
3244         }
3245
3246         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3247         tg3_nvram_exec_cmd(tp, nvram_cmd);
3248
3249         kfree(tmp);
3250
3251         return ret;
3252 }
3253
3254 /* offset and length are dword aligned */
3255 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3256                 u8 *buf)
3257 {
3258         int i, ret = 0;
3259
3260         for (i = 0; i < len; i += 4, offset += 4) {
3261                 u32 page_off, phy_addr, nvram_cmd;
3262                 __be32 data;
3263
3264                 memcpy(&data, buf + i, 4);
3265                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3266
3267                 page_off = offset % tp->nvram_pagesize;
3268
3269                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3270
3271                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3272
3273                 if (page_off == 0 || i == 0)
3274                         nvram_cmd |= NVRAM_CMD_FIRST;
3275                 if (page_off == (tp->nvram_pagesize - 4))
3276                         nvram_cmd |= NVRAM_CMD_LAST;
3277
3278                 if (i == (len - 4))
3279                         nvram_cmd |= NVRAM_CMD_LAST;
3280
3281                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3282                     !tg3_flag(tp, FLASH) ||
3283                     !tg3_flag(tp, 57765_PLUS))
3284                         tw32(NVRAM_ADDR, phy_addr);
3285
3286                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3287                     !tg3_flag(tp, 5755_PLUS) &&
3288                     (tp->nvram_jedecnum == JEDEC_ST) &&
3289                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3290                         u32 cmd;
3291
3292                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293                         ret = tg3_nvram_exec_cmd(tp, cmd);
3294                         if (ret)
3295                                 break;
3296                 }
3297                 if (!tg3_flag(tp, FLASH)) {
3298                         /* We always do complete word writes to eeprom. */
3299                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3300                 }
3301
3302                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3303                 if (ret)
3304                         break;
3305         }
3306         return ret;
3307 }
3308
3309 /* offset and length are dword aligned */
3310 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3311 {
3312         int ret;
3313
3314         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3315                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3316                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3317                 udelay(40);
3318         }
3319
3320         if (!tg3_flag(tp, NVRAM)) {
3321                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3322         } else {
3323                 u32 grc_mode;
3324
3325                 ret = tg3_nvram_lock(tp);
3326                 if (ret)
3327                         return ret;
3328
3329                 tg3_enable_nvram_access(tp);
3330                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3331                         tw32(NVRAM_WRITE1, 0x406);
3332
3333                 grc_mode = tr32(GRC_MODE);
3334                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3335
3336                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3337                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3338                                 buf);
3339                 } else {
3340                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3341                                 buf);
3342                 }
3343
3344                 grc_mode = tr32(GRC_MODE);
3345                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3346
3347                 tg3_disable_nvram_access(tp);
3348                 tg3_nvram_unlock(tp);
3349         }
3350
3351         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3352                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3353                 udelay(40);
3354         }
3355
3356         return ret;
3357 }
3358
3359 #define RX_CPU_SCRATCH_BASE     0x30000
3360 #define RX_CPU_SCRATCH_SIZE     0x04000
3361 #define TX_CPU_SCRATCH_BASE     0x34000
3362 #define TX_CPU_SCRATCH_SIZE     0x04000
3363
3364 /* tp->lock is held. */
3365 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3366 {
3367         int i;
3368
3369         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3370
3371         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3372                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3373
3374                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3375                 return 0;
3376         }
3377         if (offset == RX_CPU_BASE) {
3378                 for (i = 0; i < 10000; i++) {
3379                         tw32(offset + CPU_STATE, 0xffffffff);
3380                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3381                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3382                                 break;
3383                 }
3384
3385                 tw32(offset + CPU_STATE, 0xffffffff);
3386                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3387                 udelay(10);
3388         } else {
3389                 for (i = 0; i < 10000; i++) {
3390                         tw32(offset + CPU_STATE, 0xffffffff);
3391                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3392                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3393                                 break;
3394                 }
3395         }
3396
3397         if (i >= 10000) {
3398                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3399                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3400                 return -ENODEV;
3401         }
3402
3403         /* Clear firmware's nvram arbitration. */
3404         if (tg3_flag(tp, NVRAM))
3405                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3406         return 0;
3407 }
3408
3409 struct fw_info {
3410         unsigned int fw_base;
3411         unsigned int fw_len;
3412         const __be32 *fw_data;
3413 };
3414
3415 /* tp->lock is held. */
3416 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3417                                  u32 cpu_scratch_base, int cpu_scratch_size,
3418                                  struct fw_info *info)
3419 {
3420         int err, lock_err, i;
3421         void (*write_op)(struct tg3 *, u32, u32);
3422
3423         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3424                 netdev_err(tp->dev,
3425                            "%s: Trying to load TX cpu firmware which is 5705\n",
3426                            __func__);
3427                 return -EINVAL;
3428         }
3429
3430         if (tg3_flag(tp, 5705_PLUS))
3431                 write_op = tg3_write_mem;
3432         else
3433                 write_op = tg3_write_indirect_reg32;
3434
3435         /* It is possible that bootcode is still loading at this point.
3436          * Get the nvram lock first before halting the cpu.
3437          */
3438         lock_err = tg3_nvram_lock(tp);
3439         err = tg3_halt_cpu(tp, cpu_base);
3440         if (!lock_err)
3441                 tg3_nvram_unlock(tp);
3442         if (err)
3443                 goto out;
3444
3445         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3446                 write_op(tp, cpu_scratch_base + i, 0);
3447         tw32(cpu_base + CPU_STATE, 0xffffffff);
3448         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3449         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3450                 write_op(tp, (cpu_scratch_base +
3451                               (info->fw_base & 0xffff) +
3452                               (i * sizeof(u32))),
3453                               be32_to_cpu(info->fw_data[i]));
3454
3455         err = 0;
3456
3457 out:
3458         return err;
3459 }
3460
3461 /* tp->lock is held. */
3462 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3463 {
3464         struct fw_info info;
3465         const __be32 *fw_data;
3466         int err, i;
3467
3468         fw_data = (void *)tp->fw->data;
3469
3470         /* Firmware blob starts with version numbers, followed by
3471            start address and length. We are setting complete length.
3472            length = end_address_of_bss - start_address_of_text.
3473            Remainder is the blob to be loaded contiguously
3474            from start address. */
3475
3476         info.fw_base = be32_to_cpu(fw_data[1]);
3477         info.fw_len = tp->fw->size - 12;
3478         info.fw_data = &fw_data[3];
3479
3480         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3481                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3482                                     &info);
3483         if (err)
3484                 return err;
3485
3486         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3487                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3488                                     &info);
3489         if (err)
3490                 return err;
3491
3492         /* Now startup only the RX cpu. */
3493         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3494         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3495
3496         for (i = 0; i < 5; i++) {
3497                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3498                         break;
3499                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3500                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3501                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3502                 udelay(1000);
3503         }
3504         if (i >= 5) {
3505                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3506                            "should be %08x\n", __func__,
3507                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3508                 return -ENODEV;
3509         }
3510         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3512
3513         return 0;
3514 }
3515
3516 /* tp->lock is held. */
3517 static int tg3_load_tso_firmware(struct tg3 *tp)
3518 {
3519         struct fw_info info;
3520         const __be32 *fw_data;
3521         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3522         int err, i;
3523
3524         if (tg3_flag(tp, HW_TSO_1) ||
3525             tg3_flag(tp, HW_TSO_2) ||
3526             tg3_flag(tp, HW_TSO_3))
3527                 return 0;
3528
3529         fw_data = (void *)tp->fw->data;
3530
3531         /* Firmware blob starts with version numbers, followed by
3532            start address and length. We are setting complete length.
3533            length = end_address_of_bss - start_address_of_text.
3534            Remainder is the blob to be loaded contiguously
3535            from start address. */
3536
3537         info.fw_base = be32_to_cpu(fw_data[1]);
3538         cpu_scratch_size = tp->fw_len;
3539         info.fw_len = tp->fw->size - 12;
3540         info.fw_data = &fw_data[3];
3541
3542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3543                 cpu_base = RX_CPU_BASE;
3544                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3545         } else {
3546                 cpu_base = TX_CPU_BASE;
3547                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3548                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3549         }
3550
3551         err = tg3_load_firmware_cpu(tp, cpu_base,
3552                                     cpu_scratch_base, cpu_scratch_size,
3553                                     &info);
3554         if (err)
3555                 return err;
3556
3557         /* Now startup the cpu. */
3558         tw32(cpu_base + CPU_STATE, 0xffffffff);
3559         tw32_f(cpu_base + CPU_PC, info.fw_base);
3560
3561         for (i = 0; i < 5; i++) {
3562                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3563                         break;
3564                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3565                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3566                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3567                 udelay(1000);
3568         }
3569         if (i >= 5) {
3570                 netdev_err(tp->dev,
3571                            "%s fails to set CPU PC, is %08x should be %08x\n",
3572                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3573                 return -ENODEV;
3574         }
3575         tw32(cpu_base + CPU_STATE, 0xffffffff);
3576         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3577         return 0;
3578 }
3579
3580
3581 /* tp->lock is held. */
3582 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3583 {
3584         u32 addr_high, addr_low;
3585         int i;
3586
3587         addr_high = ((tp->dev->dev_addr[0] << 8) |
3588                      tp->dev->dev_addr[1]);
3589         addr_low = ((tp->dev->dev_addr[2] << 24) |
3590                     (tp->dev->dev_addr[3] << 16) |
3591                     (tp->dev->dev_addr[4] <<  8) |
3592                     (tp->dev->dev_addr[5] <<  0));
3593         for (i = 0; i < 4; i++) {
3594                 if (i == 1 && skip_mac_1)
3595                         continue;
3596                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3597                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3598         }
3599
3600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3602                 for (i = 0; i < 12; i++) {
3603                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3604                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3605                 }
3606         }
3607
3608         addr_high = (tp->dev->dev_addr[0] +
3609                      tp->dev->dev_addr[1] +
3610                      tp->dev->dev_addr[2] +
3611                      tp->dev->dev_addr[3] +
3612                      tp->dev->dev_addr[4] +
3613                      tp->dev->dev_addr[5]) &
3614                 TX_BACKOFF_SEED_MASK;
3615         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3616 }
3617
3618 static void tg3_enable_register_access(struct tg3 *tp)
3619 {
3620         /*
3621          * Make sure register accesses (indirect or otherwise) will function
3622          * correctly.
3623          */
3624         pci_write_config_dword(tp->pdev,
3625                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3626 }
3627
3628 static int tg3_power_up(struct tg3 *tp)
3629 {
3630         int err;
3631
3632         tg3_enable_register_access(tp);
3633
3634         err = pci_set_power_state(tp->pdev, PCI_D0);
3635         if (!err) {
3636                 /* Switch out of Vaux if it is a NIC */
3637                 tg3_pwrsrc_switch_to_vmain(tp);
3638         } else {
3639                 netdev_err(tp->dev, "Transition to D0 failed\n");
3640         }
3641
3642         return err;
3643 }
3644
3645 static int tg3_setup_phy(struct tg3 *, int);
3646
3647 static int tg3_power_down_prepare(struct tg3 *tp)
3648 {
3649         u32 misc_host_ctrl;
3650         bool device_should_wake, do_low_power;
3651
3652         tg3_enable_register_access(tp);
3653
3654         /* Restore the CLKREQ setting. */
3655         if (tg3_flag(tp, CLKREQ_BUG))
3656                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3657                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3658
3659         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3660         tw32(TG3PCI_MISC_HOST_CTRL,
3661              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3662
3663         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3664                              tg3_flag(tp, WOL_ENABLE);
3665
3666         if (tg3_flag(tp, USE_PHYLIB)) {
3667                 do_low_power = false;
3668                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3669                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3670                         struct phy_device *phydev;
3671                         u32 phyid, advertising;
3672
3673                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3674
3675                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3676
3677                         tp->link_config.speed = phydev->speed;
3678                         tp->link_config.duplex = phydev->duplex;
3679                         tp->link_config.autoneg = phydev->autoneg;
3680                         tp->link_config.advertising = phydev->advertising;
3681
3682                         advertising = ADVERTISED_TP |
3683                                       ADVERTISED_Pause |
3684                                       ADVERTISED_Autoneg |
3685                                       ADVERTISED_10baseT_Half;
3686
3687                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3688                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3689                                         advertising |=
3690                                                 ADVERTISED_100baseT_Half |
3691                                                 ADVERTISED_100baseT_Full |
3692                                                 ADVERTISED_10baseT_Full;
3693                                 else
3694                                         advertising |= ADVERTISED_10baseT_Full;
3695                         }
3696
3697                         phydev->advertising = advertising;
3698
3699                         phy_start_aneg(phydev);
3700
3701                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3702                         if (phyid != PHY_ID_BCMAC131) {
3703                                 phyid &= PHY_BCM_OUI_MASK;
3704                                 if (phyid == PHY_BCM_OUI_1 ||
3705                                     phyid == PHY_BCM_OUI_2 ||
3706                                     phyid == PHY_BCM_OUI_3)
3707                                         do_low_power = true;
3708                         }
3709                 }
3710         } else {
3711                 do_low_power = true;
3712
3713                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3714                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3715
3716                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3717                         tg3_setup_phy(tp, 0);
3718         }
3719
3720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3721                 u32 val;
3722
3723                 val = tr32(GRC_VCPU_EXT_CTRL);
3724                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3725         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3726                 int i;
3727                 u32 val;
3728
3729                 for (i = 0; i < 200; i++) {
3730                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3731                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3732                                 break;
3733                         msleep(1);
3734                 }
3735         }
3736         if (tg3_flag(tp, WOL_CAP))
3737                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3738                                                      WOL_DRV_STATE_SHUTDOWN |
3739                                                      WOL_DRV_WOL |
3740                                                      WOL_SET_MAGIC_PKT);
3741
3742         if (device_should_wake) {
3743                 u32 mac_mode;
3744
3745                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3746                         if (do_low_power &&
3747                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3748                                 tg3_phy_auxctl_write(tp,
3749                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3750                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3751                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3752                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3753                                 udelay(40);
3754                         }
3755
3756                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3757                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3758                         else
3759                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3760
3761                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3762                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3763                             ASIC_REV_5700) {
3764                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3765                                              SPEED_100 : SPEED_10;
3766                                 if (tg3_5700_link_polarity(tp, speed))
3767                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3768                                 else
3769                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3770                         }
3771                 } else {
3772                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3773                 }
3774
3775                 if (!tg3_flag(tp, 5750_PLUS))
3776                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3777
3778                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3779                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3780                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3781                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3782
3783                 if (tg3_flag(tp, ENABLE_APE))
3784                         mac_mode |= MAC_MODE_APE_TX_EN |
3785                                     MAC_MODE_APE_RX_EN |
3786                                     MAC_MODE_TDE_ENABLE;
3787
3788                 tw32_f(MAC_MODE, mac_mode);
3789                 udelay(100);
3790
3791                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3792                 udelay(10);
3793         }
3794
3795         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3796             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3797              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3798                 u32 base_val;
3799
3800                 base_val = tp->pci_clock_ctrl;
3801                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3802                              CLOCK_CTRL_TXCLK_DISABLE);
3803
3804                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3805                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3806         } else if (tg3_flag(tp, 5780_CLASS) ||
3807                    tg3_flag(tp, CPMU_PRESENT) ||
3808                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3809                 /* do nothing */
3810         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3811                 u32 newbits1, newbits2;
3812
3813                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3814                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3815                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3816                                     CLOCK_CTRL_TXCLK_DISABLE |
3817                                     CLOCK_CTRL_ALTCLK);
3818                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3819                 } else if (tg3_flag(tp, 5705_PLUS)) {
3820                         newbits1 = CLOCK_CTRL_625_CORE;
3821                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3822                 } else {
3823                         newbits1 = CLOCK_CTRL_ALTCLK;
3824                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3825                 }
3826
3827                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3828                             40);
3829
3830                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3831                             40);
3832
3833                 if (!tg3_flag(tp, 5705_PLUS)) {
3834                         u32 newbits3;
3835
3836                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3837                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3838                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3839                                             CLOCK_CTRL_TXCLK_DISABLE |
3840                                             CLOCK_CTRL_44MHZ_CORE);
3841                         } else {
3842                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3843                         }
3844
3845                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3846                                     tp->pci_clock_ctrl | newbits3, 40);
3847                 }
3848         }
3849
3850         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3851                 tg3_power_down_phy(tp, do_low_power);
3852
3853         tg3_frob_aux_power(tp, true);
3854
3855         /* Workaround for unstable PLL clock */
3856         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3857             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3858                 u32 val = tr32(0x7d00);
3859
3860                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3861                 tw32(0x7d00, val);
3862                 if (!tg3_flag(tp, ENABLE_ASF)) {
3863                         int err;
3864
3865                         err = tg3_nvram_lock(tp);
3866                         tg3_halt_cpu(tp, RX_CPU_BASE);
3867                         if (!err)
3868                                 tg3_nvram_unlock(tp);
3869                 }
3870         }
3871
3872         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3873
3874         return 0;
3875 }
3876
3877 static void tg3_power_down(struct tg3 *tp)
3878 {
3879         tg3_power_down_prepare(tp);
3880
3881         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3882         pci_set_power_state(tp->pdev, PCI_D3hot);
3883 }
3884
3885 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3886 {
3887         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3888         case MII_TG3_AUX_STAT_10HALF:
3889                 *speed = SPEED_10;
3890                 *duplex = DUPLEX_HALF;
3891                 break;
3892
3893         case MII_TG3_AUX_STAT_10FULL:
3894                 *speed = SPEED_10;
3895                 *duplex = DUPLEX_FULL;
3896                 break;
3897
3898         case MII_TG3_AUX_STAT_100HALF:
3899                 *speed = SPEED_100;
3900                 *duplex = DUPLEX_HALF;
3901                 break;
3902
3903         case MII_TG3_AUX_STAT_100FULL:
3904                 *speed = SPEED_100;
3905                 *duplex = DUPLEX_FULL;
3906                 break;
3907
3908         case MII_TG3_AUX_STAT_1000HALF:
3909                 *speed = SPEED_1000;
3910                 *duplex = DUPLEX_HALF;
3911                 break;
3912
3913         case MII_TG3_AUX_STAT_1000FULL:
3914                 *speed = SPEED_1000;
3915                 *duplex = DUPLEX_FULL;
3916                 break;
3917
3918         default:
3919                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3920                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3921                                  SPEED_10;
3922                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3923                                   DUPLEX_HALF;
3924                         break;
3925                 }
3926                 *speed = SPEED_UNKNOWN;
3927                 *duplex = DUPLEX_UNKNOWN;
3928                 break;
3929         }
3930 }
3931
3932 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3933 {
3934         int err = 0;
3935         u32 val, new_adv;
3936
3937         new_adv = ADVERTISE_CSMA;
3938         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3939         new_adv |= mii_advertise_flowctrl(flowctrl);
3940
3941         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3942         if (err)
3943                 goto done;
3944
3945         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3946                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3947
3948                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3949                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3950                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3951
3952                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3953                 if (err)
3954                         goto done;
3955         }
3956
3957         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3958                 goto done;
3959
3960         tw32(TG3_CPMU_EEE_MODE,
3961              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3962
3963         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3964         if (!err) {
3965                 u32 err2;
3966
3967                 val = 0;
3968                 /* Advertise 100-BaseTX EEE ability */
3969                 if (advertise & ADVERTISED_100baseT_Full)
3970                         val |= MDIO_AN_EEE_ADV_100TX;
3971                 /* Advertise 1000-BaseT EEE ability */
3972                 if (advertise & ADVERTISED_1000baseT_Full)
3973                         val |= MDIO_AN_EEE_ADV_1000T;
3974                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3975                 if (err)
3976                         val = 0;
3977
3978                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3979                 case ASIC_REV_5717:
3980                 case ASIC_REV_57765:
3981                 case ASIC_REV_57766:
3982                 case ASIC_REV_5719:
3983                         /* If we advertised any eee advertisements above... */
3984                         if (val)
3985                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3986                                       MII_TG3_DSP_TAP26_RMRXSTO |
3987                                       MII_TG3_DSP_TAP26_OPCSINPT;
3988                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3989                         /* Fall through */
3990                 case ASIC_REV_5720:
3991                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3992                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3993                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3994                 }
3995
3996                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3997                 if (!err)
3998                         err = err2;
3999         }
4000
4001 done:
4002         return err;
4003 }
4004
4005 static void tg3_phy_copper_begin(struct tg3 *tp)
4006 {
4007         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4008             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4009                 u32 adv, fc;
4010
4011                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4012                         adv = ADVERTISED_10baseT_Half |
4013                               ADVERTISED_10baseT_Full;
4014                         if (tg3_flag(tp, WOL_SPEED_100MB))
4015                                 adv |= ADVERTISED_100baseT_Half |
4016                                        ADVERTISED_100baseT_Full;
4017
4018                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4019                 } else {
4020                         adv = tp->link_config.advertising;
4021                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4022                                 adv &= ~(ADVERTISED_1000baseT_Half |
4023                                          ADVERTISED_1000baseT_Full);
4024
4025                         fc = tp->link_config.flowctrl;
4026                 }
4027
4028                 tg3_phy_autoneg_cfg(tp, adv, fc);
4029
4030                 tg3_writephy(tp, MII_BMCR,
4031                              BMCR_ANENABLE | BMCR_ANRESTART);
4032         } else {
4033                 int i;
4034                 u32 bmcr, orig_bmcr;
4035
4036                 tp->link_config.active_speed = tp->link_config.speed;
4037                 tp->link_config.active_duplex = tp->link_config.duplex;
4038
4039                 bmcr = 0;
4040                 switch (tp->link_config.speed) {
4041                 default:
4042                 case SPEED_10:
4043                         break;
4044
4045                 case SPEED_100:
4046                         bmcr |= BMCR_SPEED100;
4047                         break;
4048
4049                 case SPEED_1000:
4050                         bmcr |= BMCR_SPEED1000;
4051                         break;
4052                 }
4053
4054                 if (tp->link_config.duplex == DUPLEX_FULL)
4055                         bmcr |= BMCR_FULLDPLX;
4056
4057                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4058                     (bmcr != orig_bmcr)) {
4059                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4060                         for (i = 0; i < 1500; i++) {
4061                                 u32 tmp;
4062
4063                                 udelay(10);
4064                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4065                                     tg3_readphy(tp, MII_BMSR, &tmp))
4066                                         continue;
4067                                 if (!(tmp & BMSR_LSTATUS)) {
4068                                         udelay(40);
4069                                         break;
4070                                 }
4071                         }
4072                         tg3_writephy(tp, MII_BMCR, bmcr);
4073                         udelay(40);
4074                 }
4075         }
4076 }
4077
4078 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4079 {
4080         int err;
4081
4082         /* Turn off tap power management. */
4083         /* Set Extended packet length bit */
4084         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4085
4086         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4087         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4088         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4089         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4090         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4091
4092         udelay(40);
4093
4094         return err;
4095 }
4096
4097 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4098 {
4099         u32 advmsk, tgtadv, advertising;
4100
4101         advertising = tp->link_config.advertising;
4102         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4103
4104         advmsk = ADVERTISE_ALL;
4105         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4106                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4107                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4108         }
4109
4110         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4111                 return false;
4112
4113         if ((*lcladv & advmsk) != tgtadv)
4114                 return false;
4115
4116         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4117                 u32 tg3_ctrl;
4118
4119                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4120
4121                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4122                         return false;
4123
4124                 if (tgtadv &&
4125                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4126                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4127                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4128                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4129                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4130                 } else {
4131                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4132                 }
4133
4134                 if (tg3_ctrl != tgtadv)
4135                         return false;
4136         }
4137
4138         return true;
4139 }
4140
4141 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4142 {
4143         u32 lpeth = 0;
4144
4145         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4146                 u32 val;
4147
4148                 if (tg3_readphy(tp, MII_STAT1000, &val))
4149                         return false;
4150
4151                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4152         }
4153
4154         if (tg3_readphy(tp, MII_LPA, rmtadv))
4155                 return false;
4156
4157         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4158         tp->link_config.rmt_adv = lpeth;
4159
4160         return true;
4161 }
4162
4163 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4164 {
4165         int current_link_up;
4166         u32 bmsr, val;
4167         u32 lcl_adv, rmt_adv;
4168         u16 current_speed;
4169         u8 current_duplex;
4170         int i, err;
4171
4172         tw32(MAC_EVENT, 0);
4173
4174         tw32_f(MAC_STATUS,
4175              (MAC_STATUS_SYNC_CHANGED |
4176               MAC_STATUS_CFG_CHANGED |
4177               MAC_STATUS_MI_COMPLETION |
4178               MAC_STATUS_LNKSTATE_CHANGED));
4179         udelay(40);
4180
4181         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4182                 tw32_f(MAC_MI_MODE,
4183                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4184                 udelay(80);
4185         }
4186
4187         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4188
4189         /* Some third-party PHYs need to be reset on link going
4190          * down.
4191          */
4192         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4193              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4194              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4195             netif_carrier_ok(tp->dev)) {
4196                 tg3_readphy(tp, MII_BMSR, &bmsr);
4197                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4198                     !(bmsr & BMSR_LSTATUS))
4199                         force_reset = 1;
4200         }
4201         if (force_reset)
4202                 tg3_phy_reset(tp);
4203
4204         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4205                 tg3_readphy(tp, MII_BMSR, &bmsr);
4206                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4207                     !tg3_flag(tp, INIT_COMPLETE))
4208                         bmsr = 0;
4209
4210                 if (!(bmsr & BMSR_LSTATUS)) {
4211                         err = tg3_init_5401phy_dsp(tp);
4212                         if (err)
4213                                 return err;
4214
4215                         tg3_readphy(tp, MII_BMSR, &bmsr);
4216                         for (i = 0; i < 1000; i++) {
4217                                 udelay(10);
4218                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4219                                     (bmsr & BMSR_LSTATUS)) {
4220                                         udelay(40);
4221                                         break;
4222                                 }
4223                         }
4224
4225                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4226                             TG3_PHY_REV_BCM5401_B0 &&
4227                             !(bmsr & BMSR_LSTATUS) &&
4228                             tp->link_config.active_speed == SPEED_1000) {
4229                                 err = tg3_phy_reset(tp);
4230                                 if (!err)
4231                                         err = tg3_init_5401phy_dsp(tp);
4232                                 if (err)
4233                                         return err;
4234                         }
4235                 }
4236         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4237                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4238                 /* 5701 {A0,B0} CRC bug workaround */
4239                 tg3_writephy(tp, 0x15, 0x0a75);
4240                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4241                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4242                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4243         }
4244
4245         /* Clear pending interrupts... */
4246         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4247         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4248
4249         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4250                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4251         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4252                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4253
4254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4255             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4256                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4257                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4258                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4259                 else
4260                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4261         }
4262
4263         current_link_up = 0;
4264         current_speed = SPEED_UNKNOWN;
4265         current_duplex = DUPLEX_UNKNOWN;
4266         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4267         tp->link_config.rmt_adv = 0;
4268
4269         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4270                 err = tg3_phy_auxctl_read(tp,
4271                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4272                                           &val);
4273                 if (!err && !(val & (1 << 10))) {
4274                         tg3_phy_auxctl_write(tp,
4275                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4276                                              val | (1 << 10));
4277                         goto relink;
4278                 }
4279         }
4280
4281         bmsr = 0;
4282         for (i = 0; i < 100; i++) {
4283                 tg3_readphy(tp, MII_BMSR, &bmsr);
4284                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4285                     (bmsr & BMSR_LSTATUS))
4286                         break;
4287                 udelay(40);
4288         }
4289
4290         if (bmsr & BMSR_LSTATUS) {
4291                 u32 aux_stat, bmcr;
4292
4293                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4294                 for (i = 0; i < 2000; i++) {
4295                         udelay(10);
4296                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4297                             aux_stat)
4298                                 break;
4299                 }
4300
4301                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4302                                              &current_speed,
4303                                              &current_duplex);
4304
4305                 bmcr = 0;
4306                 for (i = 0; i < 200; i++) {
4307                         tg3_readphy(tp, MII_BMCR, &bmcr);
4308                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4309                                 continue;
4310                         if (bmcr && bmcr != 0x7fff)
4311                                 break;
4312                         udelay(10);
4313                 }
4314
4315                 lcl_adv = 0;
4316                 rmt_adv = 0;
4317
4318                 tp->link_config.active_speed = current_speed;
4319                 tp->link_config.active_duplex = current_duplex;
4320
4321                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4322                         if ((bmcr & BMCR_ANENABLE) &&
4323                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4324                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4325                                 current_link_up = 1;
4326                 } else {
4327                         if (!(bmcr & BMCR_ANENABLE) &&
4328                             tp->link_config.speed == current_speed &&
4329                             tp->link_config.duplex == current_duplex &&
4330                             tp->link_config.flowctrl ==
4331                             tp->link_config.active_flowctrl) {
4332                                 current_link_up = 1;
4333                         }
4334                 }
4335
4336                 if (current_link_up == 1 &&
4337                     tp->link_config.active_duplex == DUPLEX_FULL) {
4338                         u32 reg, bit;
4339
4340                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4341                                 reg = MII_TG3_FET_GEN_STAT;
4342                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4343                         } else {
4344                                 reg = MII_TG3_EXT_STAT;
4345                                 bit = MII_TG3_EXT_STAT_MDIX;
4346                         }
4347
4348                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4349                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4350
4351                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4352                 }
4353         }
4354
4355 relink:
4356         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4357                 tg3_phy_copper_begin(tp);
4358
4359                 tg3_readphy(tp, MII_BMSR, &bmsr);
4360                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4361                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4362                         current_link_up = 1;
4363         }
4364
4365         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4366         if (current_link_up == 1) {
4367                 if (tp->link_config.active_speed == SPEED_100 ||
4368                     tp->link_config.active_speed == SPEED_10)
4369                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4370                 else
4371                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4372         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4373                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4374         else
4375                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4376
4377         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4378         if (tp->link_config.active_duplex == DUPLEX_HALF)
4379                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4380
4381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4382                 if (current_link_up == 1 &&
4383                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4384                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4385                 else
4386                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4387         }
4388
4389         /* ??? Without this setting Netgear GA302T PHY does not
4390          * ??? send/receive packets...
4391          */
4392         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4393             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4394                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4395                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4396                 udelay(80);
4397         }
4398
4399         tw32_f(MAC_MODE, tp->mac_mode);
4400         udelay(40);
4401
4402         tg3_phy_eee_adjust(tp, current_link_up);
4403
4404         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4405                 /* Polled via timer. */
4406                 tw32_f(MAC_EVENT, 0);
4407         } else {
4408                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4409         }
4410         udelay(40);
4411
4412         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4413             current_link_up == 1 &&
4414             tp->link_config.active_speed == SPEED_1000 &&
4415             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4416                 udelay(120);
4417                 tw32_f(MAC_STATUS,
4418                      (MAC_STATUS_SYNC_CHANGED |
4419                       MAC_STATUS_CFG_CHANGED));
4420                 udelay(40);
4421                 tg3_write_mem(tp,
4422                               NIC_SRAM_FIRMWARE_MBOX,
4423                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4424         }
4425
4426         /* Prevent send BD corruption. */
4427         if (tg3_flag(tp, CLKREQ_BUG)) {
4428                 if (tp->link_config.active_speed == SPEED_100 ||
4429                     tp->link_config.active_speed == SPEED_10)
4430                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4431                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4432                 else
4433                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4434                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4435         }
4436
4437         if (current_link_up != netif_carrier_ok(tp->dev)) {
4438                 if (current_link_up)
4439                         netif_carrier_on(tp->dev);
4440                 else
4441                         netif_carrier_off(tp->dev);
4442                 tg3_link_report(tp);
4443         }
4444
4445         return 0;
4446 }
4447
4448 struct tg3_fiber_aneginfo {
4449         int state;
4450 #define ANEG_STATE_UNKNOWN              0
4451 #define ANEG_STATE_AN_ENABLE            1
4452 #define ANEG_STATE_RESTART_INIT         2
4453 #define ANEG_STATE_RESTART              3
4454 #define ANEG_STATE_DISABLE_LINK_OK      4
4455 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4456 #define ANEG_STATE_ABILITY_DETECT       6
4457 #define ANEG_STATE_ACK_DETECT_INIT      7
4458 #define ANEG_STATE_ACK_DETECT           8
4459 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4460 #define ANEG_STATE_COMPLETE_ACK         10
4461 #define ANEG_STATE_IDLE_DETECT_INIT     11
4462 #define ANEG_STATE_IDLE_DETECT          12
4463 #define ANEG_STATE_LINK_OK              13
4464 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4465 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4466
4467         u32 flags;
4468 #define MR_AN_ENABLE            0x00000001
4469 #define MR_RESTART_AN           0x00000002
4470 #define MR_AN_COMPLETE          0x00000004
4471 #define MR_PAGE_RX              0x00000008
4472 #define MR_NP_LOADED            0x00000010
4473 #define MR_TOGGLE_TX            0x00000020
4474 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4475 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4476 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4477 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4478 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4479 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4480 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4481 #define MR_TOGGLE_RX            0x00002000
4482 #define MR_NP_RX                0x00004000
4483
4484 #define MR_LINK_OK              0x80000000
4485
4486         unsigned long link_time, cur_time;
4487
4488         u32 ability_match_cfg;
4489         int ability_match_count;
4490
4491         char ability_match, idle_match, ack_match;
4492
4493         u32 txconfig, rxconfig;
4494 #define ANEG_CFG_NP             0x00000080
4495 #define ANEG_CFG_ACK            0x00000040
4496 #define ANEG_CFG_RF2            0x00000020
4497 #define ANEG_CFG_RF1            0x00000010
4498 #define ANEG_CFG_PS2            0x00000001
4499 #define ANEG_CFG_PS1            0x00008000
4500 #define ANEG_CFG_HD             0x00004000
4501 #define ANEG_CFG_FD             0x00002000
4502 #define ANEG_CFG_INVAL          0x00001f06
4503
4504 };
4505 #define ANEG_OK         0
4506 #define ANEG_DONE       1
4507 #define ANEG_TIMER_ENAB 2
4508 #define ANEG_FAILED     -1
4509
4510 #define ANEG_STATE_SETTLE_TIME  10000
4511
4512 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4513                                    struct tg3_fiber_aneginfo *ap)
4514 {
4515         u16 flowctrl;
4516         unsigned long delta;
4517         u32 rx_cfg_reg;
4518         int ret;
4519
4520         if (ap->state == ANEG_STATE_UNKNOWN) {
4521                 ap->rxconfig = 0;
4522                 ap->link_time = 0;
4523                 ap->cur_time = 0;
4524                 ap->ability_match_cfg = 0;
4525                 ap->ability_match_count = 0;
4526                 ap->ability_match = 0;
4527                 ap->idle_match = 0;
4528                 ap->ack_match = 0;
4529         }
4530         ap->cur_time++;
4531
4532         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4533                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4534
4535                 if (rx_cfg_reg != ap->ability_match_cfg) {
4536                         ap->ability_match_cfg = rx_cfg_reg;
4537                         ap->ability_match = 0;
4538                         ap->ability_match_count = 0;
4539                 } else {
4540                         if (++ap->ability_match_count > 1) {
4541                                 ap->ability_match = 1;
4542                                 ap->ability_match_cfg = rx_cfg_reg;
4543                         }
4544                 }
4545                 if (rx_cfg_reg & ANEG_CFG_ACK)
4546                         ap->ack_match = 1;
4547                 else
4548                         ap->ack_match = 0;
4549
4550                 ap->idle_match = 0;
4551         } else {
4552                 ap->idle_match = 1;
4553                 ap->ability_match_cfg = 0;
4554                 ap->ability_match_count = 0;
4555                 ap->ability_match = 0;
4556                 ap->ack_match = 0;
4557
4558                 rx_cfg_reg = 0;
4559         }
4560
4561         ap->rxconfig = rx_cfg_reg;
4562         ret = ANEG_OK;
4563
4564         switch (ap->state) {
4565         case ANEG_STATE_UNKNOWN:
4566                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4567                         ap->state = ANEG_STATE_AN_ENABLE;
4568
4569                 /* fallthru */
4570         case ANEG_STATE_AN_ENABLE:
4571                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4572                 if (ap->flags & MR_AN_ENABLE) {
4573                         ap->link_time = 0;
4574                         ap->cur_time = 0;
4575                         ap->ability_match_cfg = 0;
4576                         ap->ability_match_count = 0;
4577                         ap->ability_match = 0;
4578                         ap->idle_match = 0;
4579                         ap->ack_match = 0;
4580
4581                         ap->state = ANEG_STATE_RESTART_INIT;
4582                 } else {
4583                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4584                 }
4585                 break;
4586
4587         case ANEG_STATE_RESTART_INIT:
4588                 ap->link_time = ap->cur_time;
4589                 ap->flags &= ~(MR_NP_LOADED);
4590                 ap->txconfig = 0;
4591                 tw32(MAC_TX_AUTO_NEG, 0);
4592                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4593                 tw32_f(MAC_MODE, tp->mac_mode);
4594                 udelay(40);
4595
4596                 ret = ANEG_TIMER_ENAB;
4597                 ap->state = ANEG_STATE_RESTART;
4598
4599                 /* fallthru */
4600         case ANEG_STATE_RESTART:
4601                 delta = ap->cur_time - ap->link_time;
4602                 if (delta > ANEG_STATE_SETTLE_TIME)
4603                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4604                 else
4605                         ret = ANEG_TIMER_ENAB;
4606                 break;
4607
4608         case ANEG_STATE_DISABLE_LINK_OK:
4609                 ret = ANEG_DONE;
4610                 break;
4611
4612         case ANEG_STATE_ABILITY_DETECT_INIT:
4613                 ap->flags &= ~(MR_TOGGLE_TX);
4614                 ap->txconfig = ANEG_CFG_FD;
4615                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4616                 if (flowctrl & ADVERTISE_1000XPAUSE)
4617                         ap->txconfig |= ANEG_CFG_PS1;
4618                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4619                         ap->txconfig |= ANEG_CFG_PS2;
4620                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4621                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4622                 tw32_f(MAC_MODE, tp->mac_mode);
4623                 udelay(40);
4624
4625                 ap->state = ANEG_STATE_ABILITY_DETECT;
4626                 break;
4627
4628         case ANEG_STATE_ABILITY_DETECT:
4629                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4630                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4631                 break;
4632
4633         case ANEG_STATE_ACK_DETECT_INIT:
4634                 ap->txconfig |= ANEG_CFG_ACK;
4635                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4636                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4637                 tw32_f(MAC_MODE, tp->mac_mode);
4638                 udelay(40);
4639
4640                 ap->state = ANEG_STATE_ACK_DETECT;
4641
4642                 /* fallthru */
4643         case ANEG_STATE_ACK_DETECT:
4644                 if (ap->ack_match != 0) {
4645                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4646                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4647                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4648                         } else {
4649                                 ap->state = ANEG_STATE_AN_ENABLE;
4650                         }
4651                 } else if (ap->ability_match != 0 &&
4652                            ap->rxconfig == 0) {
4653                         ap->state = ANEG_STATE_AN_ENABLE;
4654                 }
4655                 break;
4656
4657         case ANEG_STATE_COMPLETE_ACK_INIT:
4658                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4659                         ret = ANEG_FAILED;
4660                         break;
4661                 }
4662                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4663                                MR_LP_ADV_HALF_DUPLEX |
4664                                MR_LP_ADV_SYM_PAUSE |
4665                                MR_LP_ADV_ASYM_PAUSE |
4666                                MR_LP_ADV_REMOTE_FAULT1 |
4667                                MR_LP_ADV_REMOTE_FAULT2 |
4668                                MR_LP_ADV_NEXT_PAGE |
4669                                MR_TOGGLE_RX |
4670                                MR_NP_RX);
4671                 if (ap->rxconfig & ANEG_CFG_FD)
4672                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4673                 if (ap->rxconfig & ANEG_CFG_HD)
4674                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4675                 if (ap->rxconfig & ANEG_CFG_PS1)
4676                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4677                 if (ap->rxconfig & ANEG_CFG_PS2)
4678                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4679                 if (ap->rxconfig & ANEG_CFG_RF1)
4680                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4681                 if (ap->rxconfig & ANEG_CFG_RF2)
4682                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4683                 if (ap->rxconfig & ANEG_CFG_NP)
4684                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4685
4686                 ap->link_time = ap->cur_time;
4687
4688                 ap->flags ^= (MR_TOGGLE_TX);
4689                 if (ap->rxconfig & 0x0008)
4690                         ap->flags |= MR_TOGGLE_RX;
4691                 if (ap->rxconfig & ANEG_CFG_NP)
4692                         ap->flags |= MR_NP_RX;
4693                 ap->flags |= MR_PAGE_RX;
4694
4695                 ap->state = ANEG_STATE_COMPLETE_ACK;
4696                 ret = ANEG_TIMER_ENAB;
4697                 break;
4698
4699         case ANEG_STATE_COMPLETE_ACK:
4700                 if (ap->ability_match != 0 &&
4701                     ap->rxconfig == 0) {
4702                         ap->state = ANEG_STATE_AN_ENABLE;
4703                         break;
4704                 }
4705                 delta = ap->cur_time - ap->link_time;
4706                 if (delta > ANEG_STATE_SETTLE_TIME) {
4707                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4708                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4709                         } else {
4710                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4711                                     !(ap->flags & MR_NP_RX)) {
4712                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4713                                 } else {
4714                                         ret = ANEG_FAILED;
4715                                 }
4716                         }
4717                 }
4718                 break;
4719
4720         case ANEG_STATE_IDLE_DETECT_INIT:
4721                 ap->link_time = ap->cur_time;
4722                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4723                 tw32_f(MAC_MODE, tp->mac_mode);
4724                 udelay(40);
4725
4726                 ap->state = ANEG_STATE_IDLE_DETECT;
4727                 ret = ANEG_TIMER_ENAB;
4728                 break;
4729
4730         case ANEG_STATE_IDLE_DETECT:
4731                 if (ap->ability_match != 0 &&
4732                     ap->rxconfig == 0) {
4733                         ap->state = ANEG_STATE_AN_ENABLE;
4734                         break;
4735                 }
4736                 delta = ap->cur_time - ap->link_time;
4737                 if (delta > ANEG_STATE_SETTLE_TIME) {
4738                         /* XXX another gem from the Broadcom driver :( */
4739                         ap->state = ANEG_STATE_LINK_OK;
4740                 }
4741                 break;
4742
4743         case ANEG_STATE_LINK_OK:
4744                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4745                 ret = ANEG_DONE;
4746                 break;
4747
4748         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4749                 /* ??? unimplemented */
4750                 break;
4751
4752         case ANEG_STATE_NEXT_PAGE_WAIT:
4753                 /* ??? unimplemented */
4754                 break;
4755
4756         default:
4757                 ret = ANEG_FAILED;
4758                 break;
4759         }
4760
4761         return ret;
4762 }
4763
4764 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4765 {
4766         int res = 0;
4767         struct tg3_fiber_aneginfo aninfo;
4768         int status = ANEG_FAILED;
4769         unsigned int tick;
4770         u32 tmp;
4771
4772         tw32_f(MAC_TX_AUTO_NEG, 0);
4773
4774         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4775         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4776         udelay(40);
4777
4778         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4779         udelay(40);
4780
4781         memset(&aninfo, 0, sizeof(aninfo));
4782         aninfo.flags |= MR_AN_ENABLE;
4783         aninfo.state = ANEG_STATE_UNKNOWN;
4784         aninfo.cur_time = 0;
4785         tick = 0;
4786         while (++tick < 195000) {
4787                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4788                 if (status == ANEG_DONE || status == ANEG_FAILED)
4789                         break;
4790
4791                 udelay(1);
4792         }
4793
4794         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4795         tw32_f(MAC_MODE, tp->mac_mode);
4796         udelay(40);
4797
4798         *txflags = aninfo.txconfig;
4799         *rxflags = aninfo.flags;
4800
4801         if (status == ANEG_DONE &&
4802             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4803                              MR_LP_ADV_FULL_DUPLEX)))
4804                 res = 1;
4805
4806         return res;
4807 }
4808
4809 static void tg3_init_bcm8002(struct tg3 *tp)
4810 {
4811         u32 mac_status = tr32(MAC_STATUS);
4812         int i;
4813
4814         /* Reset when initting first time or we have a link. */
4815         if (tg3_flag(tp, INIT_COMPLETE) &&
4816             !(mac_status & MAC_STATUS_PCS_SYNCED))
4817                 return;
4818
4819         /* Set PLL lock range. */
4820         tg3_writephy(tp, 0x16, 0x8007);
4821
4822         /* SW reset */
4823         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4824
4825         /* Wait for reset to complete. */
4826         /* XXX schedule_timeout() ... */
4827         for (i = 0; i < 500; i++)
4828                 udelay(10);
4829
4830         /* Config mode; select PMA/Ch 1 regs. */
4831         tg3_writephy(tp, 0x10, 0x8411);
4832
4833         /* Enable auto-lock and comdet, select txclk for tx. */
4834         tg3_writephy(tp, 0x11, 0x0a10);
4835
4836         tg3_writephy(tp, 0x18, 0x00a0);
4837         tg3_writephy(tp, 0x16, 0x41ff);
4838
4839         /* Assert and deassert POR. */
4840         tg3_writephy(tp, 0x13, 0x0400);
4841         udelay(40);
4842         tg3_writephy(tp, 0x13, 0x0000);
4843
4844         tg3_writephy(tp, 0x11, 0x0a50);
4845         udelay(40);
4846         tg3_writephy(tp, 0x11, 0x0a10);
4847
4848         /* Wait for signal to stabilize */
4849         /* XXX schedule_timeout() ... */
4850         for (i = 0; i < 15000; i++)
4851                 udelay(10);
4852
4853         /* Deselect the channel register so we can read the PHYID
4854          * later.
4855          */
4856         tg3_writephy(tp, 0x10, 0x8011);
4857 }
4858
4859 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4860 {
4861         u16 flowctrl;
4862         u32 sg_dig_ctrl, sg_dig_status;
4863         u32 serdes_cfg, expected_sg_dig_ctrl;
4864         int workaround, port_a;
4865         int current_link_up;
4866
4867         serdes_cfg = 0;
4868         expected_sg_dig_ctrl = 0;
4869         workaround = 0;
4870         port_a = 1;
4871         current_link_up = 0;
4872
4873         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4874             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4875                 workaround = 1;
4876                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4877                         port_a = 0;
4878
4879                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4880                 /* preserve bits 20-23 for voltage regulator */
4881                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4882         }
4883
4884         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4885
4886         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4887                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4888                         if (workaround) {
4889                                 u32 val = serdes_cfg;
4890
4891                                 if (port_a)
4892                                         val |= 0xc010000;
4893                                 else
4894                                         val |= 0x4010000;
4895                                 tw32_f(MAC_SERDES_CFG, val);
4896                         }
4897
4898                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4899                 }
4900                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4901                         tg3_setup_flow_control(tp, 0, 0);
4902                         current_link_up = 1;
4903                 }
4904                 goto out;
4905         }
4906
4907         /* Want auto-negotiation.  */
4908         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4909
4910         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4911         if (flowctrl & ADVERTISE_1000XPAUSE)
4912                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4913         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4914                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4915
4916         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4917                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4918                     tp->serdes_counter &&
4919                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4920                                     MAC_STATUS_RCVD_CFG)) ==
4921                      MAC_STATUS_PCS_SYNCED)) {
4922                         tp->serdes_counter--;
4923                         current_link_up = 1;
4924                         goto out;
4925                 }
4926 restart_autoneg:
4927                 if (workaround)
4928                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4929                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4930                 udelay(5);
4931                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4932
4933                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4934                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4935         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4936                                  MAC_STATUS_SIGNAL_DET)) {
4937                 sg_dig_status = tr32(SG_DIG_STATUS);
4938                 mac_status = tr32(MAC_STATUS);
4939
4940                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4941                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4942                         u32 local_adv = 0, remote_adv = 0;
4943
4944                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4945                                 local_adv |= ADVERTISE_1000XPAUSE;
4946                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4947                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4948
4949                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4950                                 remote_adv |= LPA_1000XPAUSE;
4951                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4952                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4953
4954                         tp->link_config.rmt_adv =
4955                                            mii_adv_to_ethtool_adv_x(remote_adv);
4956
4957                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4958                         current_link_up = 1;
4959                         tp->serdes_counter = 0;
4960                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4961                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4962                         if (tp->serdes_counter)
4963                                 tp->serdes_counter--;
4964                         else {
4965                                 if (workaround) {
4966                                         u32 val = serdes_cfg;
4967
4968                                         if (port_a)
4969                                                 val |= 0xc010000;
4970                                         else
4971                                                 val |= 0x4010000;
4972
4973                                         tw32_f(MAC_SERDES_CFG, val);
4974                                 }
4975
4976                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4977                                 udelay(40);
4978
4979                                 /* Link parallel detection - link is up */
4980                                 /* only if we have PCS_SYNC and not */
4981                                 /* receiving config code words */
4982                                 mac_status = tr32(MAC_STATUS);
4983                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4984                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4985                                         tg3_setup_flow_control(tp, 0, 0);
4986                                         current_link_up = 1;
4987                                         tp->phy_flags |=
4988                                                 TG3_PHYFLG_PARALLEL_DETECT;
4989                                         tp->serdes_counter =
4990                                                 SERDES_PARALLEL_DET_TIMEOUT;
4991                                 } else
4992                                         goto restart_autoneg;
4993                         }
4994                 }
4995         } else {
4996                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4997                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4998         }
4999
5000 out:
5001         return current_link_up;
5002 }
5003
5004 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5005 {
5006         int current_link_up = 0;
5007
5008         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5009                 goto out;
5010
5011         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5012                 u32 txflags, rxflags;
5013                 int i;
5014
5015                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5016                         u32 local_adv = 0, remote_adv = 0;
5017
5018                         if (txflags & ANEG_CFG_PS1)
5019                                 local_adv |= ADVERTISE_1000XPAUSE;
5020                         if (txflags & ANEG_CFG_PS2)
5021                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5022
5023                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5024                                 remote_adv |= LPA_1000XPAUSE;
5025                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5026                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5027
5028                         tp->link_config.rmt_adv =
5029                                            mii_adv_to_ethtool_adv_x(remote_adv);
5030
5031                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5032
5033                         current_link_up = 1;
5034                 }
5035                 for (i = 0; i < 30; i++) {
5036                         udelay(20);
5037                         tw32_f(MAC_STATUS,
5038                                (MAC_STATUS_SYNC_CHANGED |
5039                                 MAC_STATUS_CFG_CHANGED));
5040                         udelay(40);
5041                         if ((tr32(MAC_STATUS) &
5042                              (MAC_STATUS_SYNC_CHANGED |
5043                               MAC_STATUS_CFG_CHANGED)) == 0)
5044                                 break;
5045                 }
5046
5047                 mac_status = tr32(MAC_STATUS);
5048                 if (current_link_up == 0 &&
5049                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5050                     !(mac_status & MAC_STATUS_RCVD_CFG))
5051                         current_link_up = 1;
5052         } else {
5053                 tg3_setup_flow_control(tp, 0, 0);
5054
5055                 /* Forcing 1000FD link up. */
5056                 current_link_up = 1;
5057
5058                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5059                 udelay(40);
5060
5061                 tw32_f(MAC_MODE, tp->mac_mode);
5062                 udelay(40);
5063         }
5064
5065 out:
5066         return current_link_up;
5067 }
5068
5069 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5070 {
5071         u32 orig_pause_cfg;
5072         u16 orig_active_speed;
5073         u8 orig_active_duplex;
5074         u32 mac_status;
5075         int current_link_up;
5076         int i;
5077
5078         orig_pause_cfg = tp->link_config.active_flowctrl;
5079         orig_active_speed = tp->link_config.active_speed;
5080         orig_active_duplex = tp->link_config.active_duplex;
5081
5082         if (!tg3_flag(tp, HW_AUTONEG) &&
5083             netif_carrier_ok(tp->dev) &&
5084             tg3_flag(tp, INIT_COMPLETE)) {
5085                 mac_status = tr32(MAC_STATUS);
5086                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5087                                MAC_STATUS_SIGNAL_DET |
5088                                MAC_STATUS_CFG_CHANGED |
5089                                MAC_STATUS_RCVD_CFG);
5090                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5091                                    MAC_STATUS_SIGNAL_DET)) {
5092                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5093                                             MAC_STATUS_CFG_CHANGED));
5094                         return 0;
5095                 }
5096         }
5097
5098         tw32_f(MAC_TX_AUTO_NEG, 0);
5099
5100         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5101         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5102         tw32_f(MAC_MODE, tp->mac_mode);
5103         udelay(40);
5104
5105         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5106                 tg3_init_bcm8002(tp);
5107
5108         /* Enable link change event even when serdes polling.  */
5109         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5110         udelay(40);
5111
5112         current_link_up = 0;
5113         tp->link_config.rmt_adv = 0;
5114         mac_status = tr32(MAC_STATUS);
5115
5116         if (tg3_flag(tp, HW_AUTONEG))
5117                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5118         else
5119                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5120
5121         tp->napi[0].hw_status->status =
5122                 (SD_STATUS_UPDATED |
5123                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5124
5125         for (i = 0; i < 100; i++) {
5126                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5127                                     MAC_STATUS_CFG_CHANGED));
5128                 udelay(5);
5129                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5130                                          MAC_STATUS_CFG_CHANGED |
5131                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5132                         break;
5133         }
5134
5135         mac_status = tr32(MAC_STATUS);
5136         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5137                 current_link_up = 0;
5138                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5139                     tp->serdes_counter == 0) {
5140                         tw32_f(MAC_MODE, (tp->mac_mode |
5141                                           MAC_MODE_SEND_CONFIGS));
5142                         udelay(1);
5143                         tw32_f(MAC_MODE, tp->mac_mode);
5144                 }
5145         }
5146
5147         if (current_link_up == 1) {
5148                 tp->link_config.active_speed = SPEED_1000;
5149                 tp->link_config.active_duplex = DUPLEX_FULL;
5150                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5151                                     LED_CTRL_LNKLED_OVERRIDE |
5152                                     LED_CTRL_1000MBPS_ON));
5153         } else {
5154                 tp->link_config.active_speed = SPEED_UNKNOWN;
5155                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5156                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5157                                     LED_CTRL_LNKLED_OVERRIDE |
5158                                     LED_CTRL_TRAFFIC_OVERRIDE));
5159         }
5160
5161         if (current_link_up != netif_carrier_ok(tp->dev)) {
5162                 if (current_link_up)
5163                         netif_carrier_on(tp->dev);
5164                 else
5165                         netif_carrier_off(tp->dev);
5166                 tg3_link_report(tp);
5167         } else {
5168                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5169                 if (orig_pause_cfg != now_pause_cfg ||
5170                     orig_active_speed != tp->link_config.active_speed ||
5171                     orig_active_duplex != tp->link_config.active_duplex)
5172                         tg3_link_report(tp);
5173         }
5174
5175         return 0;
5176 }
5177
5178 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5179 {
5180         int current_link_up, err = 0;
5181         u32 bmsr, bmcr;
5182         u16 current_speed;
5183         u8 current_duplex;
5184         u32 local_adv, remote_adv;
5185
5186         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5187         tw32_f(MAC_MODE, tp->mac_mode);
5188         udelay(40);
5189
5190         tw32(MAC_EVENT, 0);
5191
5192         tw32_f(MAC_STATUS,
5193              (MAC_STATUS_SYNC_CHANGED |
5194               MAC_STATUS_CFG_CHANGED |
5195               MAC_STATUS_MI_COMPLETION |
5196               MAC_STATUS_LNKSTATE_CHANGED));
5197         udelay(40);
5198
5199         if (force_reset)
5200                 tg3_phy_reset(tp);
5201
5202         current_link_up = 0;
5203         current_speed = SPEED_UNKNOWN;
5204         current_duplex = DUPLEX_UNKNOWN;
5205         tp->link_config.rmt_adv = 0;
5206
5207         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5208         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5209         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5210                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5211                         bmsr |= BMSR_LSTATUS;
5212                 else
5213                         bmsr &= ~BMSR_LSTATUS;
5214         }
5215
5216         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5217
5218         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5219             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5220                 /* do nothing, just check for link up at the end */
5221         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5222                 u32 adv, newadv;
5223
5224                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5225                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5226                                  ADVERTISE_1000XPAUSE |
5227                                  ADVERTISE_1000XPSE_ASYM |
5228                                  ADVERTISE_SLCT);
5229
5230                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5231                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5232
5233                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5234                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5235                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5236                         tg3_writephy(tp, MII_BMCR, bmcr);
5237
5238                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5239                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5240                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5241
5242                         return err;
5243                 }
5244         } else {
5245                 u32 new_bmcr;
5246
5247                 bmcr &= ~BMCR_SPEED1000;
5248                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5249
5250                 if (tp->link_config.duplex == DUPLEX_FULL)
5251                         new_bmcr |= BMCR_FULLDPLX;
5252
5253                 if (new_bmcr != bmcr) {
5254                         /* BMCR_SPEED1000 is a reserved bit that needs
5255                          * to be set on write.
5256                          */
5257                         new_bmcr |= BMCR_SPEED1000;
5258
5259                         /* Force a linkdown */
5260                         if (netif_carrier_ok(tp->dev)) {
5261                                 u32 adv;
5262
5263                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5264                                 adv &= ~(ADVERTISE_1000XFULL |
5265                                          ADVERTISE_1000XHALF |
5266                                          ADVERTISE_SLCT);
5267                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5268                                 tg3_writephy(tp, MII_BMCR, bmcr |
5269                                                            BMCR_ANRESTART |
5270                                                            BMCR_ANENABLE);
5271                                 udelay(10);
5272                                 netif_carrier_off(tp->dev);
5273                         }
5274                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5275                         bmcr = new_bmcr;
5276                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5277                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5278                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5279                             ASIC_REV_5714) {
5280                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5281                                         bmsr |= BMSR_LSTATUS;
5282                                 else
5283                                         bmsr &= ~BMSR_LSTATUS;
5284                         }
5285                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5286                 }
5287         }
5288
5289         if (bmsr & BMSR_LSTATUS) {
5290                 current_speed = SPEED_1000;
5291                 current_link_up = 1;
5292                 if (bmcr & BMCR_FULLDPLX)
5293                         current_duplex = DUPLEX_FULL;
5294                 else
5295                         current_duplex = DUPLEX_HALF;
5296
5297                 local_adv = 0;
5298                 remote_adv = 0;
5299
5300                 if (bmcr & BMCR_ANENABLE) {
5301                         u32 common;
5302
5303                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5304                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5305                         common = local_adv & remote_adv;
5306                         if (common & (ADVERTISE_1000XHALF |
5307                                       ADVERTISE_1000XFULL)) {
5308                                 if (common & ADVERTISE_1000XFULL)
5309                                         current_duplex = DUPLEX_FULL;
5310                                 else
5311                                         current_duplex = DUPLEX_HALF;
5312
5313                                 tp->link_config.rmt_adv =
5314                                            mii_adv_to_ethtool_adv_x(remote_adv);
5315                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5316                                 /* Link is up via parallel detect */
5317                         } else {
5318                                 current_link_up = 0;
5319                         }
5320                 }
5321         }
5322
5323         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5324                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5325
5326         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5327         if (tp->link_config.active_duplex == DUPLEX_HALF)
5328                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5329
5330         tw32_f(MAC_MODE, tp->mac_mode);
5331         udelay(40);
5332
5333         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5334
5335         tp->link_config.active_speed = current_speed;
5336         tp->link_config.active_duplex = current_duplex;
5337
5338         if (current_link_up != netif_carrier_ok(tp->dev)) {
5339                 if (current_link_up)
5340                         netif_carrier_on(tp->dev);
5341                 else {
5342                         netif_carrier_off(tp->dev);
5343                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5344                 }
5345                 tg3_link_report(tp);
5346         }
5347         return err;
5348 }
5349
5350 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5351 {
5352         if (tp->serdes_counter) {
5353                 /* Give autoneg time to complete. */
5354                 tp->serdes_counter--;
5355                 return;
5356         }
5357
5358         if (!netif_carrier_ok(tp->dev) &&
5359             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5360                 u32 bmcr;
5361
5362                 tg3_readphy(tp, MII_BMCR, &bmcr);
5363                 if (bmcr & BMCR_ANENABLE) {
5364                         u32 phy1, phy2;
5365
5366                         /* Select shadow register 0x1f */
5367                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5368                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5369
5370                         /* Select expansion interrupt status register */
5371                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5372                                          MII_TG3_DSP_EXP1_INT_STAT);
5373                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5374                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5375
5376                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5377                                 /* We have signal detect and not receiving
5378                                  * config code words, link is up by parallel
5379                                  * detection.
5380                                  */
5381
5382                                 bmcr &= ~BMCR_ANENABLE;
5383                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5384                                 tg3_writephy(tp, MII_BMCR, bmcr);
5385                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5386                         }
5387                 }
5388         } else if (netif_carrier_ok(tp->dev) &&
5389                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5390                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5391                 u32 phy2;
5392
5393                 /* Select expansion interrupt status register */
5394                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5395                                  MII_TG3_DSP_EXP1_INT_STAT);
5396                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5397                 if (phy2 & 0x20) {
5398                         u32 bmcr;
5399
5400                         /* Config code words received, turn on autoneg. */
5401                         tg3_readphy(tp, MII_BMCR, &bmcr);
5402                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5403
5404                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5405
5406                 }
5407         }
5408 }
5409
5410 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5411 {
5412         u32 val;
5413         int err;
5414
5415         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5416                 err = tg3_setup_fiber_phy(tp, force_reset);
5417         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5418                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5419         else
5420                 err = tg3_setup_copper_phy(tp, force_reset);
5421
5422         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5423                 u32 scale;
5424
5425                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5426                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5427                         scale = 65;
5428                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5429                         scale = 6;
5430                 else
5431                         scale = 12;
5432
5433                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5434                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5435                 tw32(GRC_MISC_CFG, val);
5436         }
5437
5438         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5439               (6 << TX_LENGTHS_IPG_SHIFT);
5440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5441                 val |= tr32(MAC_TX_LENGTHS) &
5442                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5443                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5444
5445         if (tp->link_config.active_speed == SPEED_1000 &&
5446             tp->link_config.active_duplex == DUPLEX_HALF)
5447                 tw32(MAC_TX_LENGTHS, val |
5448                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5449         else
5450                 tw32(MAC_TX_LENGTHS, val |
5451                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5452
5453         if (!tg3_flag(tp, 5705_PLUS)) {
5454                 if (netif_carrier_ok(tp->dev)) {
5455                         tw32(HOSTCC_STAT_COAL_TICKS,
5456                              tp->coal.stats_block_coalesce_usecs);
5457                 } else {
5458                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5459                 }
5460         }
5461
5462         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5463                 val = tr32(PCIE_PWR_MGMT_THRESH);
5464                 if (!netif_carrier_ok(tp->dev))
5465                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5466                               tp->pwrmgmt_thresh;
5467                 else
5468                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5469                 tw32(PCIE_PWR_MGMT_THRESH, val);
5470         }
5471
5472         return err;
5473 }
5474
5475 static inline int tg3_irq_sync(struct tg3 *tp)
5476 {
5477         return tp->irq_sync;
5478 }
5479
5480 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5481 {
5482         int i;
5483
5484         dst = (u32 *)((u8 *)dst + off);
5485         for (i = 0; i < len; i += sizeof(u32))
5486                 *dst++ = tr32(off + i);
5487 }
5488
5489 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5490 {
5491         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5492         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5493         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5494         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5495         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5496         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5497         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5498         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5499         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5500         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5501         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5502         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5503         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5504         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5505         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5506         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5507         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5508         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5509         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5510
5511         if (tg3_flag(tp, SUPPORT_MSIX))
5512                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5513
5514         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5515         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5516         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5517         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5518         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5519         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5520         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5521         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5522
5523         if (!tg3_flag(tp, 5705_PLUS)) {
5524                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5525                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5526                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5527         }
5528
5529         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5530         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5531         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5532         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5533         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5534
5535         if (tg3_flag(tp, NVRAM))
5536                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5537 }
5538
5539 static void tg3_dump_state(struct tg3 *tp)
5540 {
5541         int i;
5542         u32 *regs;
5543
5544         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5545         if (!regs) {
5546                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5547                 return;
5548         }
5549
5550         if (tg3_flag(tp, PCI_EXPRESS)) {
5551                 /* Read up to but not including private PCI registers */
5552                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5553                         regs[i / sizeof(u32)] = tr32(i);
5554         } else
5555                 tg3_dump_legacy_regs(tp, regs);
5556
5557         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5558                 if (!regs[i + 0] && !regs[i + 1] &&
5559                     !regs[i + 2] && !regs[i + 3])
5560                         continue;
5561
5562                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5563                            i * 4,
5564                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5565         }
5566
5567         kfree(regs);
5568
5569         for (i = 0; i < tp->irq_cnt; i++) {
5570                 struct tg3_napi *tnapi = &tp->napi[i];
5571
5572                 /* SW status block */
5573                 netdev_err(tp->dev,
5574                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5575                            i,
5576                            tnapi->hw_status->status,
5577                            tnapi->hw_status->status_tag,
5578                            tnapi->hw_status->rx_jumbo_consumer,
5579                            tnapi->hw_status->rx_consumer,
5580                            tnapi->hw_status->rx_mini_consumer,
5581                            tnapi->hw_status->idx[0].rx_producer,
5582                            tnapi->hw_status->idx[0].tx_consumer);
5583
5584                 netdev_err(tp->dev,
5585                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5586                            i,
5587                            tnapi->last_tag, tnapi->last_irq_tag,
5588                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5589                            tnapi->rx_rcb_ptr,
5590                            tnapi->prodring.rx_std_prod_idx,
5591                            tnapi->prodring.rx_std_cons_idx,
5592                            tnapi->prodring.rx_jmb_prod_idx,
5593                            tnapi->prodring.rx_jmb_cons_idx);
5594         }
5595 }
5596
5597 /* This is called whenever we suspect that the system chipset is re-
5598  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5599  * is bogus tx completions. We try to recover by setting the
5600  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5601  * in the workqueue.
5602  */
5603 static void tg3_tx_recover(struct tg3 *tp)
5604 {
5605         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5606                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5607
5608         netdev_warn(tp->dev,
5609                     "The system may be re-ordering memory-mapped I/O "
5610                     "cycles to the network device, attempting to recover. "
5611                     "Please report the problem to the driver maintainer "
5612                     "and include system chipset information.\n");
5613
5614         spin_lock(&tp->lock);
5615         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5616         spin_unlock(&tp->lock);
5617 }
5618
5619 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5620 {
5621         /* Tell compiler to fetch tx indices from memory. */
5622         barrier();
5623         return tnapi->tx_pending -
5624                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5625 }
5626
5627 /* Tigon3 never reports partial packet sends.  So we do not
5628  * need special logic to handle SKBs that have not had all
5629  * of their frags sent yet, like SunGEM does.
5630  */
5631 static void tg3_tx(struct tg3_napi *tnapi)
5632 {
5633         struct tg3 *tp = tnapi->tp;
5634         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5635         u32 sw_idx = tnapi->tx_cons;
5636         struct netdev_queue *txq;
5637         int index = tnapi - tp->napi;
5638         unsigned int pkts_compl = 0, bytes_compl = 0;
5639
5640         if (tg3_flag(tp, ENABLE_TSS))
5641                 index--;
5642
5643         txq = netdev_get_tx_queue(tp->dev, index);
5644
5645         while (sw_idx != hw_idx) {
5646                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5647                 struct sk_buff *skb = ri->skb;
5648                 int i, tx_bug = 0;
5649
5650                 if (unlikely(skb == NULL)) {
5651                         tg3_tx_recover(tp);
5652                         return;
5653                 }
5654
5655                 pci_unmap_single(tp->pdev,
5656                                  dma_unmap_addr(ri, mapping),
5657                                  skb_headlen(skb),
5658                                  PCI_DMA_TODEVICE);
5659
5660                 ri->skb = NULL;
5661
5662                 while (ri->fragmented) {
5663                         ri->fragmented = false;
5664                         sw_idx = NEXT_TX(sw_idx);
5665                         ri = &tnapi->tx_buffers[sw_idx];
5666                 }
5667
5668                 sw_idx = NEXT_TX(sw_idx);
5669
5670                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5671                         ri = &tnapi->tx_buffers[sw_idx];
5672                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5673                                 tx_bug = 1;
5674
5675                         pci_unmap_page(tp->pdev,
5676                                        dma_unmap_addr(ri, mapping),
5677                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5678                                        PCI_DMA_TODEVICE);
5679
5680                         while (ri->fragmented) {
5681                                 ri->fragmented = false;
5682                                 sw_idx = NEXT_TX(sw_idx);
5683                                 ri = &tnapi->tx_buffers[sw_idx];
5684                         }
5685
5686                         sw_idx = NEXT_TX(sw_idx);
5687                 }
5688
5689                 pkts_compl++;
5690                 bytes_compl += skb->len;
5691
5692                 dev_kfree_skb(skb);
5693
5694                 if (unlikely(tx_bug)) {
5695                         tg3_tx_recover(tp);
5696                         return;
5697                 }
5698         }
5699
5700         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5701
5702         tnapi->tx_cons = sw_idx;
5703
5704         /* Need to make the tx_cons update visible to tg3_start_xmit()
5705          * before checking for netif_queue_stopped().  Without the
5706          * memory barrier, there is a small possibility that tg3_start_xmit()
5707          * will miss it and cause the queue to be stopped forever.
5708          */
5709         smp_mb();
5710
5711         if (unlikely(netif_tx_queue_stopped(txq) &&
5712                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5713                 __netif_tx_lock(txq, smp_processor_id());
5714                 if (netif_tx_queue_stopped(txq) &&
5715                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5716                         netif_tx_wake_queue(txq);
5717                 __netif_tx_unlock(txq);
5718         }
5719 }
5720
5721 static void tg3_frag_free(bool is_frag, void *data)
5722 {
5723         if (is_frag)
5724                 put_page(virt_to_head_page(data));
5725         else
5726                 kfree(data);
5727 }
5728
5729 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5730 {
5731         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5732                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5733
5734         if (!ri->data)
5735                 return;
5736
5737         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5738                          map_sz, PCI_DMA_FROMDEVICE);
5739         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5740         ri->data = NULL;
5741 }
5742
5743
5744 /* Returns size of skb allocated or < 0 on error.
5745  *
5746  * We only need to fill in the address because the other members
5747  * of the RX descriptor are invariant, see tg3_init_rings.
5748  *
5749  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5750  * posting buffers we only dirty the first cache line of the RX
5751  * descriptor (containing the address).  Whereas for the RX status
5752  * buffers the cpu only reads the last cacheline of the RX descriptor
5753  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5754  */
5755 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5756                              u32 opaque_key, u32 dest_idx_unmasked,
5757                              unsigned int *frag_size)
5758 {
5759         struct tg3_rx_buffer_desc *desc;
5760         struct ring_info *map;
5761         u8 *data;
5762         dma_addr_t mapping;
5763         int skb_size, data_size, dest_idx;
5764
5765         switch (opaque_key) {
5766         case RXD_OPAQUE_RING_STD:
5767                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5768                 desc = &tpr->rx_std[dest_idx];
5769                 map = &tpr->rx_std_buffers[dest_idx];
5770                 data_size = tp->rx_pkt_map_sz;
5771                 break;
5772
5773         case RXD_OPAQUE_RING_JUMBO:
5774                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5775                 desc = &tpr->rx_jmb[dest_idx].std;
5776                 map = &tpr->rx_jmb_buffers[dest_idx];
5777                 data_size = TG3_RX_JMB_MAP_SZ;
5778                 break;
5779
5780         default:
5781                 return -EINVAL;
5782         }
5783
5784         /* Do not overwrite any of the map or rp information
5785          * until we are sure we can commit to a new buffer.
5786          *
5787          * Callers depend upon this behavior and assume that
5788          * we leave everything unchanged if we fail.
5789          */
5790         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5791                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5792         if (skb_size <= PAGE_SIZE) {
5793                 data = netdev_alloc_frag(skb_size);
5794                 *frag_size = skb_size;
5795         } else {
5796                 data = kmalloc(skb_size, GFP_ATOMIC);
5797                 *frag_size = 0;
5798         }
5799         if (!data)
5800                 return -ENOMEM;
5801
5802         mapping = pci_map_single(tp->pdev,
5803                                  data + TG3_RX_OFFSET(tp),
5804                                  data_size,
5805                                  PCI_DMA_FROMDEVICE);
5806         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5807                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5808                 return -EIO;
5809         }
5810
5811         map->data = data;
5812         dma_unmap_addr_set(map, mapping, mapping);
5813
5814         desc->addr_hi = ((u64)mapping >> 32);
5815         desc->addr_lo = ((u64)mapping & 0xffffffff);
5816
5817         return data_size;
5818 }
5819
5820 /* We only need to move over in the address because the other
5821  * members of the RX descriptor are invariant.  See notes above
5822  * tg3_alloc_rx_data for full details.
5823  */
5824 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5825                            struct tg3_rx_prodring_set *dpr,
5826                            u32 opaque_key, int src_idx,
5827                            u32 dest_idx_unmasked)
5828 {
5829         struct tg3 *tp = tnapi->tp;
5830         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5831         struct ring_info *src_map, *dest_map;
5832         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5833         int dest_idx;
5834
5835         switch (opaque_key) {
5836         case RXD_OPAQUE_RING_STD:
5837                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5838                 dest_desc = &dpr->rx_std[dest_idx];
5839                 dest_map = &dpr->rx_std_buffers[dest_idx];
5840                 src_desc = &spr->rx_std[src_idx];
5841                 src_map = &spr->rx_std_buffers[src_idx];
5842                 break;
5843
5844         case RXD_OPAQUE_RING_JUMBO:
5845                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5846                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5847                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5848                 src_desc = &spr->rx_jmb[src_idx].std;
5849                 src_map = &spr->rx_jmb_buffers[src_idx];
5850                 break;
5851
5852         default:
5853                 return;
5854         }
5855
5856         dest_map->data = src_map->data;
5857         dma_unmap_addr_set(dest_map, mapping,
5858                            dma_unmap_addr(src_map, mapping));
5859         dest_desc->addr_hi = src_desc->addr_hi;
5860         dest_desc->addr_lo = src_desc->addr_lo;
5861
5862         /* Ensure that the update to the skb happens after the physical
5863          * addresses have been transferred to the new BD location.
5864          */
5865         smp_wmb();
5866
5867         src_map->data = NULL;
5868 }
5869
5870 /* The RX ring scheme is composed of multiple rings which post fresh
5871  * buffers to the chip, and one special ring the chip uses to report
5872  * status back to the host.
5873  *
5874  * The special ring reports the status of received packets to the
5875  * host.  The chip does not write into the original descriptor the
5876  * RX buffer was obtained from.  The chip simply takes the original
5877  * descriptor as provided by the host, updates the status and length
5878  * field, then writes this into the next status ring entry.
5879  *
5880  * Each ring the host uses to post buffers to the chip is described
5881  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5882  * it is first placed into the on-chip ram.  When the packet's length
5883  * is known, it walks down the TG3_BDINFO entries to select the ring.
5884  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5885  * which is within the range of the new packet's length is chosen.
5886  *
5887  * The "separate ring for rx status" scheme may sound queer, but it makes
5888  * sense from a cache coherency perspective.  If only the host writes
5889  * to the buffer post rings, and only the chip writes to the rx status
5890  * rings, then cache lines never move beyond shared-modified state.
5891  * If both the host and chip were to write into the same ring, cache line
5892  * eviction could occur since both entities want it in an exclusive state.
5893  */
5894 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5895 {
5896         struct tg3 *tp = tnapi->tp;
5897         u32 work_mask, rx_std_posted = 0;
5898         u32 std_prod_idx, jmb_prod_idx;
5899         u32 sw_idx = tnapi->rx_rcb_ptr;
5900         u16 hw_idx;
5901         int received;
5902         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5903
5904         hw_idx = *(tnapi->rx_rcb_prod_idx);
5905         /*
5906          * We need to order the read of hw_idx and the read of
5907          * the opaque cookie.
5908          */
5909         rmb();
5910         work_mask = 0;
5911         received = 0;
5912         std_prod_idx = tpr->rx_std_prod_idx;
5913         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5914         while (sw_idx != hw_idx && budget > 0) {
5915                 struct ring_info *ri;
5916                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5917                 unsigned int len;
5918                 struct sk_buff *skb;
5919                 dma_addr_t dma_addr;
5920                 u32 opaque_key, desc_idx, *post_ptr;
5921                 u8 *data;
5922
5923                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5924                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5925                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5926                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5927                         dma_addr = dma_unmap_addr(ri, mapping);
5928                         data = ri->data;
5929                         post_ptr = &std_prod_idx;
5930                         rx_std_posted++;
5931                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5932                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5933                         dma_addr = dma_unmap_addr(ri, mapping);
5934                         data = ri->data;
5935                         post_ptr = &jmb_prod_idx;
5936                 } else
5937                         goto next_pkt_nopost;
5938
5939                 work_mask |= opaque_key;
5940
5941                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5942                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5943                 drop_it:
5944                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5945                                        desc_idx, *post_ptr);
5946                 drop_it_no_recycle:
5947                         /* Other statistics kept track of by card. */
5948                         tp->rx_dropped++;
5949                         goto next_pkt;
5950                 }
5951
5952                 prefetch(data + TG3_RX_OFFSET(tp));
5953                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5954                       ETH_FCS_LEN;
5955
5956                 if (len > TG3_RX_COPY_THRESH(tp)) {
5957                         int skb_size;
5958                         unsigned int frag_size;
5959
5960                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5961                                                     *post_ptr, &frag_size);
5962                         if (skb_size < 0)
5963                                 goto drop_it;
5964
5965                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5966                                          PCI_DMA_FROMDEVICE);
5967
5968                         skb = build_skb(data, frag_size);
5969                         if (!skb) {
5970                                 tg3_frag_free(frag_size != 0, data);
5971                                 goto drop_it_no_recycle;
5972                         }
5973                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5974                         /* Ensure that the update to the data happens
5975                          * after the usage of the old DMA mapping.
5976                          */
5977                         smp_wmb();
5978
5979                         ri->data = NULL;
5980
5981                 } else {
5982                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5983                                        desc_idx, *post_ptr);
5984
5985                         skb = netdev_alloc_skb(tp->dev,
5986                                                len + TG3_RAW_IP_ALIGN);
5987                         if (skb == NULL)
5988                                 goto drop_it_no_recycle;
5989
5990                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5991                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5992                         memcpy(skb->data,
5993                                data + TG3_RX_OFFSET(tp),
5994                                len);
5995                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5996                 }
5997
5998                 skb_put(skb, len);
5999                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6000                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6001                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6002                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6003                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6004                 else
6005                         skb_checksum_none_assert(skb);
6006
6007                 skb->protocol = eth_type_trans(skb, tp->dev);
6008
6009                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6010                     skb->protocol != htons(ETH_P_8021Q)) {
6011                         dev_kfree_skb(skb);
6012                         goto drop_it_no_recycle;
6013                 }
6014
6015                 if (desc->type_flags & RXD_FLAG_VLAN &&
6016                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6017                         __vlan_hwaccel_put_tag(skb,
6018                                                desc->err_vlan & RXD_VLAN_MASK);
6019
6020                 napi_gro_receive(&tnapi->napi, skb);
6021
6022                 received++;
6023                 budget--;
6024
6025 next_pkt:
6026                 (*post_ptr)++;
6027
6028                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6029                         tpr->rx_std_prod_idx = std_prod_idx &
6030                                                tp->rx_std_ring_mask;
6031                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6032                                      tpr->rx_std_prod_idx);
6033                         work_mask &= ~RXD_OPAQUE_RING_STD;
6034                         rx_std_posted = 0;
6035                 }
6036 next_pkt_nopost:
6037                 sw_idx++;
6038                 sw_idx &= tp->rx_ret_ring_mask;
6039
6040                 /* Refresh hw_idx to see if there is new work */
6041                 if (sw_idx == hw_idx) {
6042                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6043                         rmb();
6044                 }
6045         }
6046
6047         /* ACK the status ring. */
6048         tnapi->rx_rcb_ptr = sw_idx;
6049         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6050
6051         /* Refill RX ring(s). */
6052         if (!tg3_flag(tp, ENABLE_RSS)) {
6053                 /* Sync BD data before updating mailbox */
6054                 wmb();
6055
6056                 if (work_mask & RXD_OPAQUE_RING_STD) {
6057                         tpr->rx_std_prod_idx = std_prod_idx &
6058                                                tp->rx_std_ring_mask;
6059                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6060                                      tpr->rx_std_prod_idx);
6061                 }
6062                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6063                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6064                                                tp->rx_jmb_ring_mask;
6065                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6066                                      tpr->rx_jmb_prod_idx);
6067                 }
6068                 mmiowb();
6069         } else if (work_mask) {
6070                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6071                  * updated before the producer indices can be updated.
6072                  */
6073                 smp_wmb();
6074
6075                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6076                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6077
6078                 if (tnapi != &tp->napi[1]) {
6079                         tp->rx_refill = true;
6080                         napi_schedule(&tp->napi[1].napi);
6081                 }
6082         }
6083
6084         return received;
6085 }
6086
6087 static void tg3_poll_link(struct tg3 *tp)
6088 {
6089         /* handle link change and other phy events */
6090         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6091                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6092
6093                 if (sblk->status & SD_STATUS_LINK_CHG) {
6094                         sblk->status = SD_STATUS_UPDATED |
6095                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6096                         spin_lock(&tp->lock);
6097                         if (tg3_flag(tp, USE_PHYLIB)) {
6098                                 tw32_f(MAC_STATUS,
6099                                      (MAC_STATUS_SYNC_CHANGED |
6100                                       MAC_STATUS_CFG_CHANGED |
6101                                       MAC_STATUS_MI_COMPLETION |
6102                                       MAC_STATUS_LNKSTATE_CHANGED));
6103                                 udelay(40);
6104                         } else
6105                                 tg3_setup_phy(tp, 0);
6106                         spin_unlock(&tp->lock);
6107                 }
6108         }
6109 }
6110
6111 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6112                                 struct tg3_rx_prodring_set *dpr,
6113                                 struct tg3_rx_prodring_set *spr)
6114 {
6115         u32 si, di, cpycnt, src_prod_idx;
6116         int i, err = 0;
6117
6118         while (1) {
6119                 src_prod_idx = spr->rx_std_prod_idx;
6120
6121                 /* Make sure updates to the rx_std_buffers[] entries and the
6122                  * standard producer index are seen in the correct order.
6123                  */
6124                 smp_rmb();
6125
6126                 if (spr->rx_std_cons_idx == src_prod_idx)
6127                         break;
6128
6129                 if (spr->rx_std_cons_idx < src_prod_idx)
6130                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6131                 else
6132                         cpycnt = tp->rx_std_ring_mask + 1 -
6133                                  spr->rx_std_cons_idx;
6134
6135                 cpycnt = min(cpycnt,
6136                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6137
6138                 si = spr->rx_std_cons_idx;
6139                 di = dpr->rx_std_prod_idx;
6140
6141                 for (i = di; i < di + cpycnt; i++) {
6142                         if (dpr->rx_std_buffers[i].data) {
6143                                 cpycnt = i - di;
6144                                 err = -ENOSPC;
6145                                 break;
6146                         }
6147                 }
6148
6149                 if (!cpycnt)
6150                         break;
6151
6152                 /* Ensure that updates to the rx_std_buffers ring and the
6153                  * shadowed hardware producer ring from tg3_recycle_skb() are
6154                  * ordered correctly WRT the skb check above.
6155                  */
6156                 smp_rmb();
6157
6158                 memcpy(&dpr->rx_std_buffers[di],
6159                        &spr->rx_std_buffers[si],
6160                        cpycnt * sizeof(struct ring_info));
6161
6162                 for (i = 0; i < cpycnt; i++, di++, si++) {
6163                         struct tg3_rx_buffer_desc *sbd, *dbd;
6164                         sbd = &spr->rx_std[si];
6165                         dbd = &dpr->rx_std[di];
6166                         dbd->addr_hi = sbd->addr_hi;
6167                         dbd->addr_lo = sbd->addr_lo;
6168                 }
6169
6170                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6171                                        tp->rx_std_ring_mask;
6172                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6173                                        tp->rx_std_ring_mask;
6174         }
6175
6176         while (1) {
6177                 src_prod_idx = spr->rx_jmb_prod_idx;
6178
6179                 /* Make sure updates to the rx_jmb_buffers[] entries and
6180                  * the jumbo producer index are seen in the correct order.
6181                  */
6182                 smp_rmb();
6183
6184                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6185                         break;
6186
6187                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6188                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6189                 else
6190                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6191                                  spr->rx_jmb_cons_idx;
6192
6193                 cpycnt = min(cpycnt,
6194                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6195
6196                 si = spr->rx_jmb_cons_idx;
6197                 di = dpr->rx_jmb_prod_idx;
6198
6199                 for (i = di; i < di + cpycnt; i++) {
6200                         if (dpr->rx_jmb_buffers[i].data) {
6201                                 cpycnt = i - di;
6202                                 err = -ENOSPC;
6203                                 break;
6204                         }
6205                 }
6206
6207                 if (!cpycnt)
6208                         break;
6209
6210                 /* Ensure that updates to the rx_jmb_buffers ring and the
6211                  * shadowed hardware producer ring from tg3_recycle_skb() are
6212                  * ordered correctly WRT the skb check above.
6213                  */
6214                 smp_rmb();
6215
6216                 memcpy(&dpr->rx_jmb_buffers[di],
6217                        &spr->rx_jmb_buffers[si],
6218                        cpycnt * sizeof(struct ring_info));
6219
6220                 for (i = 0; i < cpycnt; i++, di++, si++) {
6221                         struct tg3_rx_buffer_desc *sbd, *dbd;
6222                         sbd = &spr->rx_jmb[si].std;
6223                         dbd = &dpr->rx_jmb[di].std;
6224                         dbd->addr_hi = sbd->addr_hi;
6225                         dbd->addr_lo = sbd->addr_lo;
6226                 }
6227
6228                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6229                                        tp->rx_jmb_ring_mask;
6230                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6231                                        tp->rx_jmb_ring_mask;
6232         }
6233
6234         return err;
6235 }
6236
6237 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6238 {
6239         struct tg3 *tp = tnapi->tp;
6240
6241         /* run TX completion thread */
6242         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6243                 tg3_tx(tnapi);
6244                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6245                         return work_done;
6246         }
6247
6248         if (!tnapi->rx_rcb_prod_idx)
6249                 return work_done;
6250
6251         /* run RX thread, within the bounds set by NAPI.
6252          * All RX "locking" is done by ensuring outside
6253          * code synchronizes with tg3->napi.poll()
6254          */
6255         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6256                 work_done += tg3_rx(tnapi, budget - work_done);
6257
6258         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6259                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6260                 int i, err = 0;
6261                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6262                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6263
6264                 tp->rx_refill = false;
6265                 for (i = 1; i <= tp->rxq_cnt; i++)
6266                         err |= tg3_rx_prodring_xfer(tp, dpr,
6267                                                     &tp->napi[i].prodring);
6268
6269                 wmb();
6270
6271                 if (std_prod_idx != dpr->rx_std_prod_idx)
6272                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6273                                      dpr->rx_std_prod_idx);
6274
6275                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6276                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6277                                      dpr->rx_jmb_prod_idx);
6278
6279                 mmiowb();
6280
6281                 if (err)
6282                         tw32_f(HOSTCC_MODE, tp->coal_now);
6283         }
6284
6285         return work_done;
6286 }
6287
6288 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6289 {
6290         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6291                 schedule_work(&tp->reset_task);
6292 }
6293
6294 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6295 {
6296         cancel_work_sync(&tp->reset_task);
6297         tg3_flag_clear(tp, RESET_TASK_PENDING);
6298         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6299 }
6300
6301 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6302 {
6303         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6304         struct tg3 *tp = tnapi->tp;
6305         int work_done = 0;
6306         struct tg3_hw_status *sblk = tnapi->hw_status;
6307
6308         while (1) {
6309                 work_done = tg3_poll_work(tnapi, work_done, budget);
6310
6311                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6312                         goto tx_recovery;
6313
6314                 if (unlikely(work_done >= budget))
6315                         break;
6316
6317                 /* tp->last_tag is used in tg3_int_reenable() below
6318                  * to tell the hw how much work has been processed,
6319                  * so we must read it before checking for more work.
6320                  */
6321                 tnapi->last_tag = sblk->status_tag;
6322                 tnapi->last_irq_tag = tnapi->last_tag;
6323                 rmb();
6324
6325                 /* check for RX/TX work to do */
6326                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6327                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6328
6329                         /* This test here is not race free, but will reduce
6330                          * the number of interrupts by looping again.
6331                          */
6332                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6333                                 continue;
6334
6335                         napi_complete(napi);
6336                         /* Reenable interrupts. */
6337                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6338
6339                         /* This test here is synchronized by napi_schedule()
6340                          * and napi_complete() to close the race condition.
6341                          */
6342                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6343                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6344                                                   HOSTCC_MODE_ENABLE |
6345                                                   tnapi->coal_now);
6346                         }
6347                         mmiowb();
6348                         break;
6349                 }
6350         }
6351
6352         return work_done;
6353
6354 tx_recovery:
6355         /* work_done is guaranteed to be less than budget. */
6356         napi_complete(napi);
6357         tg3_reset_task_schedule(tp);
6358         return work_done;
6359 }
6360
6361 static void tg3_process_error(struct tg3 *tp)
6362 {
6363         u32 val;
6364         bool real_error = false;
6365
6366         if (tg3_flag(tp, ERROR_PROCESSED))
6367                 return;
6368
6369         /* Check Flow Attention register */
6370         val = tr32(HOSTCC_FLOW_ATTN);
6371         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6372                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6373                 real_error = true;
6374         }
6375
6376         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6377                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6378                 real_error = true;
6379         }
6380
6381         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6382                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6383                 real_error = true;
6384         }
6385
6386         if (!real_error)
6387                 return;
6388
6389         tg3_dump_state(tp);
6390
6391         tg3_flag_set(tp, ERROR_PROCESSED);
6392         tg3_reset_task_schedule(tp);
6393 }
6394
6395 static int tg3_poll(struct napi_struct *napi, int budget)
6396 {
6397         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6398         struct tg3 *tp = tnapi->tp;
6399         int work_done = 0;
6400         struct tg3_hw_status *sblk = tnapi->hw_status;
6401
6402         while (1) {
6403                 if (sblk->status & SD_STATUS_ERROR)
6404                         tg3_process_error(tp);
6405
6406                 tg3_poll_link(tp);
6407
6408                 work_done = tg3_poll_work(tnapi, work_done, budget);
6409
6410                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6411                         goto tx_recovery;
6412
6413                 if (unlikely(work_done >= budget))
6414                         break;
6415
6416                 if (tg3_flag(tp, TAGGED_STATUS)) {
6417                         /* tp->last_tag is used in tg3_int_reenable() below
6418                          * to tell the hw how much work has been processed,
6419                          * so we must read it before checking for more work.
6420                          */
6421                         tnapi->last_tag = sblk->status_tag;
6422                         tnapi->last_irq_tag = tnapi->last_tag;
6423                         rmb();
6424                 } else
6425                         sblk->status &= ~SD_STATUS_UPDATED;
6426
6427                 if (likely(!tg3_has_work(tnapi))) {
6428                         napi_complete(napi);
6429                         tg3_int_reenable(tnapi);
6430                         break;
6431                 }
6432         }
6433
6434         return work_done;
6435
6436 tx_recovery:
6437         /* work_done is guaranteed to be less than budget. */
6438         napi_complete(napi);
6439         tg3_reset_task_schedule(tp);
6440         return work_done;
6441 }
6442
6443 static void tg3_napi_disable(struct tg3 *tp)
6444 {
6445         int i;
6446
6447         for (i = tp->irq_cnt - 1; i >= 0; i--)
6448                 napi_disable(&tp->napi[i].napi);
6449 }
6450
6451 static void tg3_napi_enable(struct tg3 *tp)
6452 {
6453         int i;
6454
6455         for (i = 0; i < tp->irq_cnt; i++)
6456                 napi_enable(&tp->napi[i].napi);
6457 }
6458
6459 static void tg3_napi_init(struct tg3 *tp)
6460 {
6461         int i;
6462
6463         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6464         for (i = 1; i < tp->irq_cnt; i++)
6465                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6466 }
6467
6468 static void tg3_napi_fini(struct tg3 *tp)
6469 {
6470         int i;
6471
6472         for (i = 0; i < tp->irq_cnt; i++)
6473                 netif_napi_del(&tp->napi[i].napi);
6474 }
6475
6476 static inline void tg3_netif_stop(struct tg3 *tp)
6477 {
6478         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6479         tg3_napi_disable(tp);
6480         netif_tx_disable(tp->dev);
6481 }
6482
6483 static inline void tg3_netif_start(struct tg3 *tp)
6484 {
6485         /* NOTE: unconditional netif_tx_wake_all_queues is only
6486          * appropriate so long as all callers are assured to
6487          * have free tx slots (such as after tg3_init_hw)
6488          */
6489         netif_tx_wake_all_queues(tp->dev);
6490
6491         tg3_napi_enable(tp);
6492         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6493         tg3_enable_ints(tp);
6494 }
6495
6496 static void tg3_irq_quiesce(struct tg3 *tp)
6497 {
6498         int i;
6499
6500         BUG_ON(tp->irq_sync);
6501
6502         tp->irq_sync = 1;
6503         smp_mb();
6504
6505         for (i = 0; i < tp->irq_cnt; i++)
6506                 synchronize_irq(tp->napi[i].irq_vec);
6507 }
6508
6509 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6510  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6511  * with as well.  Most of the time, this is not necessary except when
6512  * shutting down the device.
6513  */
6514 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6515 {
6516         spin_lock_bh(&tp->lock);
6517         if (irq_sync)
6518                 tg3_irq_quiesce(tp);
6519 }
6520
6521 static inline void tg3_full_unlock(struct tg3 *tp)
6522 {
6523         spin_unlock_bh(&tp->lock);
6524 }
6525
6526 /* One-shot MSI handler - Chip automatically disables interrupt
6527  * after sending MSI so driver doesn't have to do it.
6528  */
6529 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6530 {
6531         struct tg3_napi *tnapi = dev_id;
6532         struct tg3 *tp = tnapi->tp;
6533
6534         prefetch(tnapi->hw_status);
6535         if (tnapi->rx_rcb)
6536                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6537
6538         if (likely(!tg3_irq_sync(tp)))
6539                 napi_schedule(&tnapi->napi);
6540
6541         return IRQ_HANDLED;
6542 }
6543
6544 /* MSI ISR - No need to check for interrupt sharing and no need to
6545  * flush status block and interrupt mailbox. PCI ordering rules
6546  * guarantee that MSI will arrive after the status block.
6547  */
6548 static irqreturn_t tg3_msi(int irq, void *dev_id)
6549 {
6550         struct tg3_napi *tnapi = dev_id;
6551         struct tg3 *tp = tnapi->tp;
6552
6553         prefetch(tnapi->hw_status);
6554         if (tnapi->rx_rcb)
6555                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6556         /*
6557          * Writing any value to intr-mbox-0 clears PCI INTA# and
6558          * chip-internal interrupt pending events.
6559          * Writing non-zero to intr-mbox-0 additional tells the
6560          * NIC to stop sending us irqs, engaging "in-intr-handler"
6561          * event coalescing.
6562          */
6563         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6564         if (likely(!tg3_irq_sync(tp)))
6565                 napi_schedule(&tnapi->napi);
6566
6567         return IRQ_RETVAL(1);
6568 }
6569
6570 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6571 {
6572         struct tg3_napi *tnapi = dev_id;
6573         struct tg3 *tp = tnapi->tp;
6574         struct tg3_hw_status *sblk = tnapi->hw_status;
6575         unsigned int handled = 1;
6576
6577         /* In INTx mode, it is possible for the interrupt to arrive at
6578          * the CPU before the status block posted prior to the interrupt.
6579          * Reading the PCI State register will confirm whether the
6580          * interrupt is ours and will flush the status block.
6581          */
6582         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6583                 if (tg3_flag(tp, CHIP_RESETTING) ||
6584                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6585                         handled = 0;
6586                         goto out;
6587                 }
6588         }
6589
6590         /*
6591          * Writing any value to intr-mbox-0 clears PCI INTA# and
6592          * chip-internal interrupt pending events.
6593          * Writing non-zero to intr-mbox-0 additional tells the
6594          * NIC to stop sending us irqs, engaging "in-intr-handler"
6595          * event coalescing.
6596          *
6597          * Flush the mailbox to de-assert the IRQ immediately to prevent
6598          * spurious interrupts.  The flush impacts performance but
6599          * excessive spurious interrupts can be worse in some cases.
6600          */
6601         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6602         if (tg3_irq_sync(tp))
6603                 goto out;
6604         sblk->status &= ~SD_STATUS_UPDATED;
6605         if (likely(tg3_has_work(tnapi))) {
6606                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6607                 napi_schedule(&tnapi->napi);
6608         } else {
6609                 /* No work, shared interrupt perhaps?  re-enable
6610                  * interrupts, and flush that PCI write
6611                  */
6612                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6613                                0x00000000);
6614         }
6615 out:
6616         return IRQ_RETVAL(handled);
6617 }
6618
6619 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6620 {
6621         struct tg3_napi *tnapi = dev_id;
6622         struct tg3 *tp = tnapi->tp;
6623         struct tg3_hw_status *sblk = tnapi->hw_status;
6624         unsigned int handled = 1;
6625
6626         /* In INTx mode, it is possible for the interrupt to arrive at
6627          * the CPU before the status block posted prior to the interrupt.
6628          * Reading the PCI State register will confirm whether the
6629          * interrupt is ours and will flush the status block.
6630          */
6631         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6632                 if (tg3_flag(tp, CHIP_RESETTING) ||
6633                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6634                         handled = 0;
6635                         goto out;
6636                 }
6637         }
6638
6639         /*
6640          * writing any value to intr-mbox-0 clears PCI INTA# and
6641          * chip-internal interrupt pending events.
6642          * writing non-zero to intr-mbox-0 additional tells the
6643          * NIC to stop sending us irqs, engaging "in-intr-handler"
6644          * event coalescing.
6645          *
6646          * Flush the mailbox to de-assert the IRQ immediately to prevent
6647          * spurious interrupts.  The flush impacts performance but
6648          * excessive spurious interrupts can be worse in some cases.
6649          */
6650         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6651
6652         /*
6653          * In a shared interrupt configuration, sometimes other devices'
6654          * interrupts will scream.  We record the current status tag here
6655          * so that the above check can report that the screaming interrupts
6656          * are unhandled.  Eventually they will be silenced.
6657          */
6658         tnapi->last_irq_tag = sblk->status_tag;
6659
6660         if (tg3_irq_sync(tp))
6661                 goto out;
6662
6663         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6664
6665         napi_schedule(&tnapi->napi);
6666
6667 out:
6668         return IRQ_RETVAL(handled);
6669 }
6670
6671 /* ISR for interrupt test */
6672 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6673 {
6674         struct tg3_napi *tnapi = dev_id;
6675         struct tg3 *tp = tnapi->tp;
6676         struct tg3_hw_status *sblk = tnapi->hw_status;
6677
6678         if ((sblk->status & SD_STATUS_UPDATED) ||
6679             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6680                 tg3_disable_ints(tp);
6681                 return IRQ_RETVAL(1);
6682         }
6683         return IRQ_RETVAL(0);
6684 }
6685
6686 #ifdef CONFIG_NET_POLL_CONTROLLER
6687 static void tg3_poll_controller(struct net_device *dev)
6688 {
6689         int i;
6690         struct tg3 *tp = netdev_priv(dev);
6691
6692         for (i = 0; i < tp->irq_cnt; i++)
6693                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6694 }
6695 #endif
6696
6697 static void tg3_tx_timeout(struct net_device *dev)
6698 {
6699         struct tg3 *tp = netdev_priv(dev);
6700
6701         if (netif_msg_tx_err(tp)) {
6702                 netdev_err(dev, "transmit timed out, resetting\n");
6703                 tg3_dump_state(tp);
6704         }
6705
6706         tg3_reset_task_schedule(tp);
6707 }
6708
6709 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6710 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6711 {
6712         u32 base = (u32) mapping & 0xffffffff;
6713
6714         return (base > 0xffffdcc0) && (base + len + 8 < base);
6715 }
6716
6717 /* Test for DMA addresses > 40-bit */
6718 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6719                                           int len)
6720 {
6721 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6722         if (tg3_flag(tp, 40BIT_DMA_BUG))
6723                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6724         return 0;
6725 #else
6726         return 0;
6727 #endif
6728 }
6729
6730 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6731                                  dma_addr_t mapping, u32 len, u32 flags,
6732                                  u32 mss, u32 vlan)
6733 {
6734         txbd->addr_hi = ((u64) mapping >> 32);
6735         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6736         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6737         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6738 }
6739
6740 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6741                             dma_addr_t map, u32 len, u32 flags,
6742                             u32 mss, u32 vlan)
6743 {
6744         struct tg3 *tp = tnapi->tp;
6745         bool hwbug = false;
6746
6747         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6748                 hwbug = true;
6749
6750         if (tg3_4g_overflow_test(map, len))
6751                 hwbug = true;
6752
6753         if (tg3_40bit_overflow_test(tp, map, len))
6754                 hwbug = true;
6755
6756         if (tp->dma_limit) {
6757                 u32 prvidx = *entry;
6758                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6759                 while (len > tp->dma_limit && *budget) {
6760                         u32 frag_len = tp->dma_limit;
6761                         len -= tp->dma_limit;
6762
6763                         /* Avoid the 8byte DMA problem */
6764                         if (len <= 8) {
6765                                 len += tp->dma_limit / 2;
6766                                 frag_len = tp->dma_limit / 2;
6767                         }
6768
6769                         tnapi->tx_buffers[*entry].fragmented = true;
6770
6771                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6772                                       frag_len, tmp_flag, mss, vlan);
6773                         *budget -= 1;
6774                         prvidx = *entry;
6775                         *entry = NEXT_TX(*entry);
6776
6777                         map += frag_len;
6778                 }
6779
6780                 if (len) {
6781                         if (*budget) {
6782                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6783                                               len, flags, mss, vlan);
6784                                 *budget -= 1;
6785                                 *entry = NEXT_TX(*entry);
6786                         } else {
6787                                 hwbug = true;
6788                                 tnapi->tx_buffers[prvidx].fragmented = false;
6789                         }
6790                 }
6791         } else {
6792                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6793                               len, flags, mss, vlan);
6794                 *entry = NEXT_TX(*entry);
6795         }
6796
6797         return hwbug;
6798 }
6799
6800 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6801 {
6802         int i;
6803         struct sk_buff *skb;
6804         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6805
6806         skb = txb->skb;
6807         txb->skb = NULL;
6808
6809         pci_unmap_single(tnapi->tp->pdev,
6810                          dma_unmap_addr(txb, mapping),
6811                          skb_headlen(skb),
6812                          PCI_DMA_TODEVICE);
6813
6814         while (txb->fragmented) {
6815                 txb->fragmented = false;
6816                 entry = NEXT_TX(entry);
6817                 txb = &tnapi->tx_buffers[entry];
6818         }
6819
6820         for (i = 0; i <= last; i++) {
6821                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6822
6823                 entry = NEXT_TX(entry);
6824                 txb = &tnapi->tx_buffers[entry];
6825
6826                 pci_unmap_page(tnapi->tp->pdev,
6827                                dma_unmap_addr(txb, mapping),
6828                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6829
6830                 while (txb->fragmented) {
6831                         txb->fragmented = false;
6832                         entry = NEXT_TX(entry);
6833                         txb = &tnapi->tx_buffers[entry];
6834                 }
6835         }
6836 }
6837
6838 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6839 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6840                                        struct sk_buff **pskb,
6841                                        u32 *entry, u32 *budget,
6842                                        u32 base_flags, u32 mss, u32 vlan)
6843 {
6844         struct tg3 *tp = tnapi->tp;
6845         struct sk_buff *new_skb, *skb = *pskb;
6846         dma_addr_t new_addr = 0;
6847         int ret = 0;
6848
6849         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6850                 new_skb = skb_copy(skb, GFP_ATOMIC);
6851         else {
6852                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6853
6854                 new_skb = skb_copy_expand(skb,
6855                                           skb_headroom(skb) + more_headroom,
6856                                           skb_tailroom(skb), GFP_ATOMIC);
6857         }
6858
6859         if (!new_skb) {
6860                 ret = -1;
6861         } else {
6862                 /* New SKB is guaranteed to be linear. */
6863                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6864                                           PCI_DMA_TODEVICE);
6865                 /* Make sure the mapping succeeded */
6866                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6867                         dev_kfree_skb(new_skb);
6868                         ret = -1;
6869                 } else {
6870                         u32 save_entry = *entry;
6871
6872                         base_flags |= TXD_FLAG_END;
6873
6874                         tnapi->tx_buffers[*entry].skb = new_skb;
6875                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6876                                            mapping, new_addr);
6877
6878                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6879                                             new_skb->len, base_flags,
6880                                             mss, vlan)) {
6881                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6882                                 dev_kfree_skb(new_skb);
6883                                 ret = -1;
6884                         }
6885                 }
6886         }
6887
6888         dev_kfree_skb(skb);
6889         *pskb = new_skb;
6890         return ret;
6891 }
6892
6893 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6894
6895 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6896  * TSO header is greater than 80 bytes.
6897  */
6898 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6899 {
6900         struct sk_buff *segs, *nskb;
6901         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6902
6903         /* Estimate the number of fragments in the worst case */
6904         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6905                 netif_stop_queue(tp->dev);
6906
6907                 /* netif_tx_stop_queue() must be done before checking
6908                  * checking tx index in tg3_tx_avail() below, because in
6909                  * tg3_tx(), we update tx index before checking for
6910                  * netif_tx_queue_stopped().
6911                  */
6912                 smp_mb();
6913                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6914                         return NETDEV_TX_BUSY;
6915
6916                 netif_wake_queue(tp->dev);
6917         }
6918
6919         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6920         if (IS_ERR(segs))
6921                 goto tg3_tso_bug_end;
6922
6923         do {
6924                 nskb = segs;
6925                 segs = segs->next;
6926                 nskb->next = NULL;
6927                 tg3_start_xmit(nskb, tp->dev);
6928         } while (segs);
6929
6930 tg3_tso_bug_end:
6931         dev_kfree_skb(skb);
6932
6933         return NETDEV_TX_OK;
6934 }
6935
6936 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6937  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6938  */
6939 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6940 {
6941         struct tg3 *tp = netdev_priv(dev);
6942         u32 len, entry, base_flags, mss, vlan = 0;
6943         u32 budget;
6944         int i = -1, would_hit_hwbug;
6945         dma_addr_t mapping;
6946         struct tg3_napi *tnapi;
6947         struct netdev_queue *txq;
6948         unsigned int last;
6949
6950         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6951         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6952         if (tg3_flag(tp, ENABLE_TSS))
6953                 tnapi++;
6954
6955         budget = tg3_tx_avail(tnapi);
6956
6957         /* We are running in BH disabled context with netif_tx_lock
6958          * and TX reclaim runs via tp->napi.poll inside of a software
6959          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6960          * no IRQ context deadlocks to worry about either.  Rejoice!
6961          */
6962         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6963                 if (!netif_tx_queue_stopped(txq)) {
6964                         netif_tx_stop_queue(txq);
6965
6966                         /* This is a hard error, log it. */
6967                         netdev_err(dev,
6968                                    "BUG! Tx Ring full when queue awake!\n");
6969                 }
6970                 return NETDEV_TX_BUSY;
6971         }
6972
6973         entry = tnapi->tx_prod;
6974         base_flags = 0;
6975         if (skb->ip_summed == CHECKSUM_PARTIAL)
6976                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6977
6978         mss = skb_shinfo(skb)->gso_size;
6979         if (mss) {
6980                 struct iphdr *iph;
6981                 u32 tcp_opt_len, hdr_len;
6982
6983                 if (skb_header_cloned(skb) &&
6984                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6985                         goto drop;
6986
6987                 iph = ip_hdr(skb);
6988                 tcp_opt_len = tcp_optlen(skb);
6989
6990                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6991
6992                 if (!skb_is_gso_v6(skb)) {
6993                         iph->check = 0;
6994                         iph->tot_len = htons(mss + hdr_len);
6995                 }
6996
6997                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6998                     tg3_flag(tp, TSO_BUG))
6999                         return tg3_tso_bug(tp, skb);
7000
7001                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7002                                TXD_FLAG_CPU_POST_DMA);
7003
7004                 if (tg3_flag(tp, HW_TSO_1) ||
7005                     tg3_flag(tp, HW_TSO_2) ||
7006                     tg3_flag(tp, HW_TSO_3)) {
7007                         tcp_hdr(skb)->check = 0;
7008                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7009                 } else
7010                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7011                                                                  iph->daddr, 0,
7012                                                                  IPPROTO_TCP,
7013                                                                  0);
7014
7015                 if (tg3_flag(tp, HW_TSO_3)) {
7016                         mss |= (hdr_len & 0xc) << 12;
7017                         if (hdr_len & 0x10)
7018                                 base_flags |= 0x00000010;
7019                         base_flags |= (hdr_len & 0x3e0) << 5;
7020                 } else if (tg3_flag(tp, HW_TSO_2))
7021                         mss |= hdr_len << 9;
7022                 else if (tg3_flag(tp, HW_TSO_1) ||
7023                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7024                         if (tcp_opt_len || iph->ihl > 5) {
7025                                 int tsflags;
7026
7027                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7028                                 mss |= (tsflags << 11);
7029                         }
7030                 } else {
7031                         if (tcp_opt_len || iph->ihl > 5) {
7032                                 int tsflags;
7033
7034                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7035                                 base_flags |= tsflags << 12;
7036                         }
7037                 }
7038         }
7039
7040         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7041             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7042                 base_flags |= TXD_FLAG_JMB_PKT;
7043
7044         if (vlan_tx_tag_present(skb)) {
7045                 base_flags |= TXD_FLAG_VLAN;
7046                 vlan = vlan_tx_tag_get(skb);
7047         }
7048
7049         len = skb_headlen(skb);
7050
7051         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7052         if (pci_dma_mapping_error(tp->pdev, mapping))
7053                 goto drop;
7054
7055
7056         tnapi->tx_buffers[entry].skb = skb;
7057         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7058
7059         would_hit_hwbug = 0;
7060
7061         if (tg3_flag(tp, 5701_DMA_BUG))
7062                 would_hit_hwbug = 1;
7063
7064         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7065                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7066                             mss, vlan)) {
7067                 would_hit_hwbug = 1;
7068         } else if (skb_shinfo(skb)->nr_frags > 0) {
7069                 u32 tmp_mss = mss;
7070
7071                 if (!tg3_flag(tp, HW_TSO_1) &&
7072                     !tg3_flag(tp, HW_TSO_2) &&
7073                     !tg3_flag(tp, HW_TSO_3))
7074                         tmp_mss = 0;
7075
7076                 /* Now loop through additional data
7077                  * fragments, and queue them.
7078                  */
7079                 last = skb_shinfo(skb)->nr_frags - 1;
7080                 for (i = 0; i <= last; i++) {
7081                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7082
7083                         len = skb_frag_size(frag);
7084                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7085                                                    len, DMA_TO_DEVICE);
7086
7087                         tnapi->tx_buffers[entry].skb = NULL;
7088                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7089                                            mapping);
7090                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7091                                 goto dma_error;
7092
7093                         if (!budget ||
7094                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7095                                             len, base_flags |
7096                                             ((i == last) ? TXD_FLAG_END : 0),
7097                                             tmp_mss, vlan)) {
7098                                 would_hit_hwbug = 1;
7099                                 break;
7100                         }
7101                 }
7102         }
7103
7104         if (would_hit_hwbug) {
7105                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7106
7107                 /* If the workaround fails due to memory/mapping
7108                  * failure, silently drop this packet.
7109                  */
7110                 entry = tnapi->tx_prod;
7111                 budget = tg3_tx_avail(tnapi);
7112                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7113                                                 base_flags, mss, vlan))
7114                         goto drop_nofree;
7115         }
7116
7117         skb_tx_timestamp(skb);
7118         netdev_tx_sent_queue(txq, skb->len);
7119
7120         /* Sync BD data before updating mailbox */
7121         wmb();
7122
7123         /* Packets are ready, update Tx producer idx local and on card. */
7124         tw32_tx_mbox(tnapi->prodmbox, entry);
7125
7126         tnapi->tx_prod = entry;
7127         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7128                 netif_tx_stop_queue(txq);
7129
7130                 /* netif_tx_stop_queue() must be done before checking
7131                  * checking tx index in tg3_tx_avail() below, because in
7132                  * tg3_tx(), we update tx index before checking for
7133                  * netif_tx_queue_stopped().
7134                  */
7135                 smp_mb();
7136                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7137                         netif_tx_wake_queue(txq);
7138         }
7139
7140         mmiowb();
7141         return NETDEV_TX_OK;
7142
7143 dma_error:
7144         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7145         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7146 drop:
7147         dev_kfree_skb(skb);
7148 drop_nofree:
7149         tp->tx_dropped++;
7150         return NETDEV_TX_OK;
7151 }
7152
7153 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7154 {
7155         if (enable) {
7156                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7157                                   MAC_MODE_PORT_MODE_MASK);
7158
7159                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7160
7161                 if (!tg3_flag(tp, 5705_PLUS))
7162                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7163
7164                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7165                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7166                 else
7167                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7168         } else {
7169                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7170
7171                 if (tg3_flag(tp, 5705_PLUS) ||
7172                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7173                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7174                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7175         }
7176
7177         tw32(MAC_MODE, tp->mac_mode);
7178         udelay(40);
7179 }
7180
7181 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7182 {
7183         u32 val, bmcr, mac_mode, ptest = 0;
7184
7185         tg3_phy_toggle_apd(tp, false);
7186         tg3_phy_toggle_automdix(tp, 0);
7187
7188         if (extlpbk && tg3_phy_set_extloopbk(tp))
7189                 return -EIO;
7190
7191         bmcr = BMCR_FULLDPLX;
7192         switch (speed) {
7193         case SPEED_10:
7194                 break;
7195         case SPEED_100:
7196                 bmcr |= BMCR_SPEED100;
7197                 break;
7198         case SPEED_1000:
7199         default:
7200                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7201                         speed = SPEED_100;
7202                         bmcr |= BMCR_SPEED100;
7203                 } else {
7204                         speed = SPEED_1000;
7205                         bmcr |= BMCR_SPEED1000;
7206                 }
7207         }
7208
7209         if (extlpbk) {
7210                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7211                         tg3_readphy(tp, MII_CTRL1000, &val);
7212                         val |= CTL1000_AS_MASTER |
7213                                CTL1000_ENABLE_MASTER;
7214                         tg3_writephy(tp, MII_CTRL1000, val);
7215                 } else {
7216                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7217                                 MII_TG3_FET_PTEST_TRIM_2;
7218                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7219                 }
7220         } else
7221                 bmcr |= BMCR_LOOPBACK;
7222
7223         tg3_writephy(tp, MII_BMCR, bmcr);
7224
7225         /* The write needs to be flushed for the FETs */
7226         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7227                 tg3_readphy(tp, MII_BMCR, &bmcr);
7228
7229         udelay(40);
7230
7231         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7232             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7233                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7234                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7235                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7236
7237                 /* The write needs to be flushed for the AC131 */
7238                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7239         }
7240
7241         /* Reset to prevent losing 1st rx packet intermittently */
7242         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7243             tg3_flag(tp, 5780_CLASS)) {
7244                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7245                 udelay(10);
7246                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7247         }
7248
7249         mac_mode = tp->mac_mode &
7250                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7251         if (speed == SPEED_1000)
7252                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7253         else
7254                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7255
7256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7257                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7258
7259                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7260                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7261                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7262                         mac_mode |= MAC_MODE_LINK_POLARITY;
7263
7264                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7265                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7266         }
7267
7268         tw32(MAC_MODE, mac_mode);
7269         udelay(40);
7270
7271         return 0;
7272 }
7273
7274 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7275 {
7276         struct tg3 *tp = netdev_priv(dev);
7277
7278         if (features & NETIF_F_LOOPBACK) {
7279                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7280                         return;
7281
7282                 spin_lock_bh(&tp->lock);
7283                 tg3_mac_loopback(tp, true);
7284                 netif_carrier_on(tp->dev);
7285                 spin_unlock_bh(&tp->lock);
7286                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7287         } else {
7288                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7289                         return;
7290
7291                 spin_lock_bh(&tp->lock);
7292                 tg3_mac_loopback(tp, false);
7293                 /* Force link status check */
7294                 tg3_setup_phy(tp, 1);
7295                 spin_unlock_bh(&tp->lock);
7296                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7297         }
7298 }
7299
7300 static netdev_features_t tg3_fix_features(struct net_device *dev,
7301         netdev_features_t features)
7302 {
7303         struct tg3 *tp = netdev_priv(dev);
7304
7305         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7306                 features &= ~NETIF_F_ALL_TSO;
7307
7308         return features;
7309 }
7310
7311 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7312 {
7313         netdev_features_t changed = dev->features ^ features;
7314
7315         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7316                 tg3_set_loopback(dev, features);
7317
7318         return 0;
7319 }
7320
7321 static void tg3_rx_prodring_free(struct tg3 *tp,
7322                                  struct tg3_rx_prodring_set *tpr)
7323 {
7324         int i;
7325
7326         if (tpr != &tp->napi[0].prodring) {
7327                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7328                      i = (i + 1) & tp->rx_std_ring_mask)
7329                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7330                                         tp->rx_pkt_map_sz);
7331
7332                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7333                         for (i = tpr->rx_jmb_cons_idx;
7334                              i != tpr->rx_jmb_prod_idx;
7335                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7336                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7337                                                 TG3_RX_JMB_MAP_SZ);
7338                         }
7339                 }
7340
7341                 return;
7342         }
7343
7344         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7345                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7346                                 tp->rx_pkt_map_sz);
7347
7348         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7349                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7350                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7351                                         TG3_RX_JMB_MAP_SZ);
7352         }
7353 }
7354
7355 /* Initialize rx rings for packet processing.
7356  *
7357  * The chip has been shut down and the driver detached from
7358  * the networking, so no interrupts or new tx packets will
7359  * end up in the driver.  tp->{tx,}lock are held and thus
7360  * we may not sleep.
7361  */
7362 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7363                                  struct tg3_rx_prodring_set *tpr)
7364 {
7365         u32 i, rx_pkt_dma_sz;
7366
7367         tpr->rx_std_cons_idx = 0;
7368         tpr->rx_std_prod_idx = 0;
7369         tpr->rx_jmb_cons_idx = 0;
7370         tpr->rx_jmb_prod_idx = 0;
7371
7372         if (tpr != &tp->napi[0].prodring) {
7373                 memset(&tpr->rx_std_buffers[0], 0,
7374                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7375                 if (tpr->rx_jmb_buffers)
7376                         memset(&tpr->rx_jmb_buffers[0], 0,
7377                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7378                 goto done;
7379         }
7380
7381         /* Zero out all descriptors. */
7382         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7383
7384         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7385         if (tg3_flag(tp, 5780_CLASS) &&
7386             tp->dev->mtu > ETH_DATA_LEN)
7387                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7388         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7389
7390         /* Initialize invariants of the rings, we only set this
7391          * stuff once.  This works because the card does not
7392          * write into the rx buffer posting rings.
7393          */
7394         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7395                 struct tg3_rx_buffer_desc *rxd;
7396
7397                 rxd = &tpr->rx_std[i];
7398                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7399                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7400                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7401                                (i << RXD_OPAQUE_INDEX_SHIFT));
7402         }
7403
7404         /* Now allocate fresh SKBs for each rx ring. */
7405         for (i = 0; i < tp->rx_pending; i++) {
7406                 unsigned int frag_size;
7407
7408                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7409                                       &frag_size) < 0) {
7410                         netdev_warn(tp->dev,
7411                                     "Using a smaller RX standard ring. Only "
7412                                     "%d out of %d buffers were allocated "
7413                                     "successfully\n", i, tp->rx_pending);
7414                         if (i == 0)
7415                                 goto initfail;
7416                         tp->rx_pending = i;
7417                         break;
7418                 }
7419         }
7420
7421         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7422                 goto done;
7423
7424         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7425
7426         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7427                 goto done;
7428
7429         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7430                 struct tg3_rx_buffer_desc *rxd;
7431
7432                 rxd = &tpr->rx_jmb[i].std;
7433                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7434                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7435                                   RXD_FLAG_JUMBO;
7436                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7437                        (i << RXD_OPAQUE_INDEX_SHIFT));
7438         }
7439
7440         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7441                 unsigned int frag_size;
7442
7443                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7444                                       &frag_size) < 0) {
7445                         netdev_warn(tp->dev,
7446                                     "Using a smaller RX jumbo ring. Only %d "
7447                                     "out of %d buffers were allocated "
7448                                     "successfully\n", i, tp->rx_jumbo_pending);
7449                         if (i == 0)
7450                                 goto initfail;
7451                         tp->rx_jumbo_pending = i;
7452                         break;
7453                 }
7454         }
7455
7456 done:
7457         return 0;
7458
7459 initfail:
7460         tg3_rx_prodring_free(tp, tpr);
7461         return -ENOMEM;
7462 }
7463
7464 static void tg3_rx_prodring_fini(struct tg3 *tp,
7465                                  struct tg3_rx_prodring_set *tpr)
7466 {
7467         kfree(tpr->rx_std_buffers);
7468         tpr->rx_std_buffers = NULL;
7469         kfree(tpr->rx_jmb_buffers);
7470         tpr->rx_jmb_buffers = NULL;
7471         if (tpr->rx_std) {
7472                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7473                                   tpr->rx_std, tpr->rx_std_mapping);
7474                 tpr->rx_std = NULL;
7475         }
7476         if (tpr->rx_jmb) {
7477                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7478                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7479                 tpr->rx_jmb = NULL;
7480         }
7481 }
7482
7483 static int tg3_rx_prodring_init(struct tg3 *tp,
7484                                 struct tg3_rx_prodring_set *tpr)
7485 {
7486         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7487                                       GFP_KERNEL);
7488         if (!tpr->rx_std_buffers)
7489                 return -ENOMEM;
7490
7491         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7492                                          TG3_RX_STD_RING_BYTES(tp),
7493                                          &tpr->rx_std_mapping,
7494                                          GFP_KERNEL);
7495         if (!tpr->rx_std)
7496                 goto err_out;
7497
7498         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7499                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7500                                               GFP_KERNEL);
7501                 if (!tpr->rx_jmb_buffers)
7502                         goto err_out;
7503
7504                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7505                                                  TG3_RX_JMB_RING_BYTES(tp),
7506                                                  &tpr->rx_jmb_mapping,
7507                                                  GFP_KERNEL);
7508                 if (!tpr->rx_jmb)
7509                         goto err_out;
7510         }
7511
7512         return 0;
7513
7514 err_out:
7515         tg3_rx_prodring_fini(tp, tpr);
7516         return -ENOMEM;
7517 }
7518
7519 /* Free up pending packets in all rx/tx rings.
7520  *
7521  * The chip has been shut down and the driver detached from
7522  * the networking, so no interrupts or new tx packets will
7523  * end up in the driver.  tp->{tx,}lock is not held and we are not
7524  * in an interrupt context and thus may sleep.
7525  */
7526 static void tg3_free_rings(struct tg3 *tp)
7527 {
7528         int i, j;
7529
7530         for (j = 0; j < tp->irq_cnt; j++) {
7531                 struct tg3_napi *tnapi = &tp->napi[j];
7532
7533                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7534
7535                 if (!tnapi->tx_buffers)
7536                         continue;
7537
7538                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7539                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7540
7541                         if (!skb)
7542                                 continue;
7543
7544                         tg3_tx_skb_unmap(tnapi, i,
7545                                          skb_shinfo(skb)->nr_frags - 1);
7546
7547                         dev_kfree_skb_any(skb);
7548                 }
7549                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7550         }
7551 }
7552
7553 /* Initialize tx/rx rings for packet processing.
7554  *
7555  * The chip has been shut down and the driver detached from
7556  * the networking, so no interrupts or new tx packets will
7557  * end up in the driver.  tp->{tx,}lock are held and thus
7558  * we may not sleep.
7559  */
7560 static int tg3_init_rings(struct tg3 *tp)
7561 {
7562         int i;
7563
7564         /* Free up all the SKBs. */
7565         tg3_free_rings(tp);
7566
7567         for (i = 0; i < tp->irq_cnt; i++) {
7568                 struct tg3_napi *tnapi = &tp->napi[i];
7569
7570                 tnapi->last_tag = 0;
7571                 tnapi->last_irq_tag = 0;
7572                 tnapi->hw_status->status = 0;
7573                 tnapi->hw_status->status_tag = 0;
7574                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7575
7576                 tnapi->tx_prod = 0;
7577                 tnapi->tx_cons = 0;
7578                 if (tnapi->tx_ring)
7579                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7580
7581                 tnapi->rx_rcb_ptr = 0;
7582                 if (tnapi->rx_rcb)
7583                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7584
7585                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7586                         tg3_free_rings(tp);
7587                         return -ENOMEM;
7588                 }
7589         }
7590
7591         return 0;
7592 }
7593
7594 static void tg3_mem_tx_release(struct tg3 *tp)
7595 {
7596         int i;
7597
7598         for (i = 0; i < tp->irq_max; i++) {
7599                 struct tg3_napi *tnapi = &tp->napi[i];
7600
7601                 if (tnapi->tx_ring) {
7602                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7603                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7604                         tnapi->tx_ring = NULL;
7605                 }
7606
7607                 kfree(tnapi->tx_buffers);
7608                 tnapi->tx_buffers = NULL;
7609         }
7610 }
7611
7612 static int tg3_mem_tx_acquire(struct tg3 *tp)
7613 {
7614         int i;
7615         struct tg3_napi *tnapi = &tp->napi[0];
7616
7617         /* If multivector TSS is enabled, vector 0 does not handle
7618          * tx interrupts.  Don't allocate any resources for it.
7619          */
7620         if (tg3_flag(tp, ENABLE_TSS))
7621                 tnapi++;
7622
7623         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7624                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7625                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7626                 if (!tnapi->tx_buffers)
7627                         goto err_out;
7628
7629                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7630                                                     TG3_TX_RING_BYTES,
7631                                                     &tnapi->tx_desc_mapping,
7632                                                     GFP_KERNEL);
7633                 if (!tnapi->tx_ring)
7634                         goto err_out;
7635         }
7636
7637         return 0;
7638
7639 err_out:
7640         tg3_mem_tx_release(tp);
7641         return -ENOMEM;
7642 }
7643
7644 static void tg3_mem_rx_release(struct tg3 *tp)
7645 {
7646         int i;
7647
7648         for (i = 0; i < tp->irq_max; i++) {
7649                 struct tg3_napi *tnapi = &tp->napi[i];
7650
7651                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7652
7653                 if (!tnapi->rx_rcb)
7654                         continue;
7655
7656                 dma_free_coherent(&tp->pdev->dev,
7657                                   TG3_RX_RCB_RING_BYTES(tp),
7658                                   tnapi->rx_rcb,
7659                                   tnapi->rx_rcb_mapping);
7660                 tnapi->rx_rcb = NULL;
7661         }
7662 }
7663
7664 static int tg3_mem_rx_acquire(struct tg3 *tp)
7665 {
7666         unsigned int i, limit;
7667
7668         limit = tp->rxq_cnt;
7669
7670         /* If RSS is enabled, we need a (dummy) producer ring
7671          * set on vector zero.  This is the true hw prodring.
7672          */
7673         if (tg3_flag(tp, ENABLE_RSS))
7674                 limit++;
7675
7676         for (i = 0; i < limit; i++) {
7677                 struct tg3_napi *tnapi = &tp->napi[i];
7678
7679                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7680                         goto err_out;
7681
7682                 /* If multivector RSS is enabled, vector 0
7683                  * does not handle rx or tx interrupts.
7684                  * Don't allocate any resources for it.
7685                  */
7686                 if (!i && tg3_flag(tp, ENABLE_RSS))
7687                         continue;
7688
7689                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7690                                                    TG3_RX_RCB_RING_BYTES(tp),
7691                                                    &tnapi->rx_rcb_mapping,
7692                                                    GFP_KERNEL);
7693                 if (!tnapi->rx_rcb)
7694                         goto err_out;
7695
7696                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7697         }
7698
7699         return 0;
7700
7701 err_out:
7702         tg3_mem_rx_release(tp);
7703         return -ENOMEM;
7704 }
7705
7706 /*
7707  * Must not be invoked with interrupt sources disabled and
7708  * the hardware shutdown down.
7709  */
7710 static void tg3_free_consistent(struct tg3 *tp)
7711 {
7712         int i;
7713
7714         for (i = 0; i < tp->irq_cnt; i++) {
7715                 struct tg3_napi *tnapi = &tp->napi[i];
7716
7717                 if (tnapi->hw_status) {
7718                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7719                                           tnapi->hw_status,
7720                                           tnapi->status_mapping);
7721                         tnapi->hw_status = NULL;
7722                 }
7723         }
7724
7725         tg3_mem_rx_release(tp);
7726         tg3_mem_tx_release(tp);
7727
7728         if (tp->hw_stats) {
7729                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7730                                   tp->hw_stats, tp->stats_mapping);
7731                 tp->hw_stats = NULL;
7732         }
7733 }
7734
7735 /*
7736  * Must not be invoked with interrupt sources disabled and
7737  * the hardware shutdown down.  Can sleep.
7738  */
7739 static int tg3_alloc_consistent(struct tg3 *tp)
7740 {
7741         int i;
7742
7743         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7744                                           sizeof(struct tg3_hw_stats),
7745                                           &tp->stats_mapping,
7746                                           GFP_KERNEL);
7747         if (!tp->hw_stats)
7748                 goto err_out;
7749
7750         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7751
7752         for (i = 0; i < tp->irq_cnt; i++) {
7753                 struct tg3_napi *tnapi = &tp->napi[i];
7754                 struct tg3_hw_status *sblk;
7755
7756                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7757                                                       TG3_HW_STATUS_SIZE,
7758                                                       &tnapi->status_mapping,
7759                                                       GFP_KERNEL);
7760                 if (!tnapi->hw_status)
7761                         goto err_out;
7762
7763                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7764                 sblk = tnapi->hw_status;
7765
7766                 if (tg3_flag(tp, ENABLE_RSS)) {
7767                         u16 *prodptr = NULL;
7768
7769                         /*
7770                          * When RSS is enabled, the status block format changes
7771                          * slightly.  The "rx_jumbo_consumer", "reserved",
7772                          * and "rx_mini_consumer" members get mapped to the
7773                          * other three rx return ring producer indexes.
7774                          */
7775                         switch (i) {
7776                         case 1:
7777                                 prodptr = &sblk->idx[0].rx_producer;
7778                                 break;
7779                         case 2:
7780                                 prodptr = &sblk->rx_jumbo_consumer;
7781                                 break;
7782                         case 3:
7783                                 prodptr = &sblk->reserved;
7784                                 break;
7785                         case 4:
7786                                 prodptr = &sblk->rx_mini_consumer;
7787                                 break;
7788                         }
7789                         tnapi->rx_rcb_prod_idx = prodptr;
7790                 } else {
7791                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7792                 }
7793         }
7794
7795         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7796                 goto err_out;
7797
7798         return 0;
7799
7800 err_out:
7801         tg3_free_consistent(tp);
7802         return -ENOMEM;
7803 }
7804
7805 #define MAX_WAIT_CNT 1000
7806
7807 /* To stop a block, clear the enable bit and poll till it
7808  * clears.  tp->lock is held.
7809  */
7810 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7811 {
7812         unsigned int i;
7813         u32 val;
7814
7815         if (tg3_flag(tp, 5705_PLUS)) {
7816                 switch (ofs) {
7817                 case RCVLSC_MODE:
7818                 case DMAC_MODE:
7819                 case MBFREE_MODE:
7820                 case BUFMGR_MODE:
7821                 case MEMARB_MODE:
7822                         /* We can't enable/disable these bits of the
7823                          * 5705/5750, just say success.
7824                          */
7825                         return 0;
7826
7827                 default:
7828                         break;
7829                 }
7830         }
7831
7832         val = tr32(ofs);
7833         val &= ~enable_bit;
7834         tw32_f(ofs, val);
7835
7836         for (i = 0; i < MAX_WAIT_CNT; i++) {
7837                 udelay(100);
7838                 val = tr32(ofs);
7839                 if ((val & enable_bit) == 0)
7840                         break;
7841         }
7842
7843         if (i == MAX_WAIT_CNT && !silent) {
7844                 dev_err(&tp->pdev->dev,
7845                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7846                         ofs, enable_bit);
7847                 return -ENODEV;
7848         }
7849
7850         return 0;
7851 }
7852
7853 /* tp->lock is held. */
7854 static int tg3_abort_hw(struct tg3 *tp, int silent)
7855 {
7856         int i, err;
7857
7858         tg3_disable_ints(tp);
7859
7860         tp->rx_mode &= ~RX_MODE_ENABLE;
7861         tw32_f(MAC_RX_MODE, tp->rx_mode);
7862         udelay(10);
7863
7864         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7865         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7866         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7867         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7868         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7869         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7870
7871         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7872         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7873         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7874         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7875         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7876         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7877         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7878
7879         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7880         tw32_f(MAC_MODE, tp->mac_mode);
7881         udelay(40);
7882
7883         tp->tx_mode &= ~TX_MODE_ENABLE;
7884         tw32_f(MAC_TX_MODE, tp->tx_mode);
7885
7886         for (i = 0; i < MAX_WAIT_CNT; i++) {
7887                 udelay(100);
7888                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7889                         break;
7890         }
7891         if (i >= MAX_WAIT_CNT) {
7892                 dev_err(&tp->pdev->dev,
7893                         "%s timed out, TX_MODE_ENABLE will not clear "
7894                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7895                 err |= -ENODEV;
7896         }
7897
7898         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7899         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7900         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7901
7902         tw32(FTQ_RESET, 0xffffffff);
7903         tw32(FTQ_RESET, 0x00000000);
7904
7905         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7906         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7907
7908         for (i = 0; i < tp->irq_cnt; i++) {
7909                 struct tg3_napi *tnapi = &tp->napi[i];
7910                 if (tnapi->hw_status)
7911                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7912         }
7913
7914         return err;
7915 }
7916
7917 /* Save PCI command register before chip reset */
7918 static void tg3_save_pci_state(struct tg3 *tp)
7919 {
7920         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7921 }
7922
7923 /* Restore PCI state after chip reset */
7924 static void tg3_restore_pci_state(struct tg3 *tp)
7925 {
7926         u32 val;
7927
7928         /* Re-enable indirect register accesses. */
7929         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7930                                tp->misc_host_ctrl);
7931
7932         /* Set MAX PCI retry to zero. */
7933         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7934         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7935             tg3_flag(tp, PCIX_MODE))
7936                 val |= PCISTATE_RETRY_SAME_DMA;
7937         /* Allow reads and writes to the APE register and memory space. */
7938         if (tg3_flag(tp, ENABLE_APE))
7939                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7940                        PCISTATE_ALLOW_APE_SHMEM_WR |
7941                        PCISTATE_ALLOW_APE_PSPACE_WR;
7942         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7943
7944         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7945
7946         if (!tg3_flag(tp, PCI_EXPRESS)) {
7947                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7948                                       tp->pci_cacheline_sz);
7949                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7950                                       tp->pci_lat_timer);
7951         }
7952
7953         /* Make sure PCI-X relaxed ordering bit is clear. */
7954         if (tg3_flag(tp, PCIX_MODE)) {
7955                 u16 pcix_cmd;
7956
7957                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7958                                      &pcix_cmd);
7959                 pcix_cmd &= ~PCI_X_CMD_ERO;
7960                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7961                                       pcix_cmd);
7962         }
7963
7964         if (tg3_flag(tp, 5780_CLASS)) {
7965
7966                 /* Chip reset on 5780 will reset MSI enable bit,
7967                  * so need to restore it.
7968                  */
7969                 if (tg3_flag(tp, USING_MSI)) {
7970                         u16 ctrl;
7971
7972                         pci_read_config_word(tp->pdev,
7973                                              tp->msi_cap + PCI_MSI_FLAGS,
7974                                              &ctrl);
7975                         pci_write_config_word(tp->pdev,
7976                                               tp->msi_cap + PCI_MSI_FLAGS,
7977                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7978                         val = tr32(MSGINT_MODE);
7979                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7980                 }
7981         }
7982 }
7983
7984 /* tp->lock is held. */
7985 static int tg3_chip_reset(struct tg3 *tp)
7986 {
7987         u32 val;
7988         void (*write_op)(struct tg3 *, u32, u32);
7989         int i, err;
7990
7991         tg3_nvram_lock(tp);
7992
7993         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7994
7995         /* No matching tg3_nvram_unlock() after this because
7996          * chip reset below will undo the nvram lock.
7997          */
7998         tp->nvram_lock_cnt = 0;
7999
8000         /* GRC_MISC_CFG core clock reset will clear the memory
8001          * enable bit in PCI register 4 and the MSI enable bit
8002          * on some chips, so we save relevant registers here.
8003          */
8004         tg3_save_pci_state(tp);
8005
8006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8007             tg3_flag(tp, 5755_PLUS))
8008                 tw32(GRC_FASTBOOT_PC, 0);
8009
8010         /*
8011          * We must avoid the readl() that normally takes place.
8012          * It locks machines, causes machine checks, and other
8013          * fun things.  So, temporarily disable the 5701
8014          * hardware workaround, while we do the reset.
8015          */
8016         write_op = tp->write32;
8017         if (write_op == tg3_write_flush_reg32)
8018                 tp->write32 = tg3_write32;
8019
8020         /* Prevent the irq handler from reading or writing PCI registers
8021          * during chip reset when the memory enable bit in the PCI command
8022          * register may be cleared.  The chip does not generate interrupt
8023          * at this time, but the irq handler may still be called due to irq
8024          * sharing or irqpoll.
8025          */
8026         tg3_flag_set(tp, CHIP_RESETTING);
8027         for (i = 0; i < tp->irq_cnt; i++) {
8028                 struct tg3_napi *tnapi = &tp->napi[i];
8029                 if (tnapi->hw_status) {
8030                         tnapi->hw_status->status = 0;
8031                         tnapi->hw_status->status_tag = 0;
8032                 }
8033                 tnapi->last_tag = 0;
8034                 tnapi->last_irq_tag = 0;
8035         }
8036         smp_mb();
8037
8038         for (i = 0; i < tp->irq_cnt; i++)
8039                 synchronize_irq(tp->napi[i].irq_vec);
8040
8041         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8042                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8043                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8044         }
8045
8046         /* do the reset */
8047         val = GRC_MISC_CFG_CORECLK_RESET;
8048
8049         if (tg3_flag(tp, PCI_EXPRESS)) {
8050                 /* Force PCIe 1.0a mode */
8051                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8052                     !tg3_flag(tp, 57765_PLUS) &&
8053                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8054                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8055                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8056
8057                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8058                         tw32(GRC_MISC_CFG, (1 << 29));
8059                         val |= (1 << 29);
8060                 }
8061         }
8062
8063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8064                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8065                 tw32(GRC_VCPU_EXT_CTRL,
8066                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8067         }
8068
8069         /* Manage gphy power for all CPMU absent PCIe devices. */
8070         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8071                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8072
8073         tw32(GRC_MISC_CFG, val);
8074
8075         /* restore 5701 hardware bug workaround write method */
8076         tp->write32 = write_op;
8077
8078         /* Unfortunately, we have to delay before the PCI read back.
8079          * Some 575X chips even will not respond to a PCI cfg access
8080          * when the reset command is given to the chip.
8081          *
8082          * How do these hardware designers expect things to work
8083          * properly if the PCI write is posted for a long period
8084          * of time?  It is always necessary to have some method by
8085          * which a register read back can occur to push the write
8086          * out which does the reset.
8087          *
8088          * For most tg3 variants the trick below was working.
8089          * Ho hum...
8090          */
8091         udelay(120);
8092
8093         /* Flush PCI posted writes.  The normal MMIO registers
8094          * are inaccessible at this time so this is the only
8095          * way to make this reliably (actually, this is no longer
8096          * the case, see above).  I tried to use indirect
8097          * register read/write but this upset some 5701 variants.
8098          */
8099         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8100
8101         udelay(120);
8102
8103         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8104                 u16 val16;
8105
8106                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8107                         int j;
8108                         u32 cfg_val;
8109
8110                         /* Wait for link training to complete.  */
8111                         for (j = 0; j < 5000; j++)
8112                                 udelay(100);
8113
8114                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8115                         pci_write_config_dword(tp->pdev, 0xc4,
8116                                                cfg_val | (1 << 15));
8117                 }
8118
8119                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8120                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8121                 /*
8122                  * Older PCIe devices only support the 128 byte
8123                  * MPS setting.  Enforce the restriction.
8124                  */
8125                 if (!tg3_flag(tp, CPMU_PRESENT))
8126                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8127                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8128
8129                 /* Clear error status */
8130                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8131                                       PCI_EXP_DEVSTA_CED |
8132                                       PCI_EXP_DEVSTA_NFED |
8133                                       PCI_EXP_DEVSTA_FED |
8134                                       PCI_EXP_DEVSTA_URD);
8135         }
8136
8137         tg3_restore_pci_state(tp);
8138
8139         tg3_flag_clear(tp, CHIP_RESETTING);
8140         tg3_flag_clear(tp, ERROR_PROCESSED);
8141
8142         val = 0;
8143         if (tg3_flag(tp, 5780_CLASS))
8144                 val = tr32(MEMARB_MODE);
8145         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8146
8147         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8148                 tg3_stop_fw(tp);
8149                 tw32(0x5000, 0x400);
8150         }
8151
8152         tw32(GRC_MODE, tp->grc_mode);
8153
8154         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8155                 val = tr32(0xc4);
8156
8157                 tw32(0xc4, val | (1 << 15));
8158         }
8159
8160         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8162                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8163                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8164                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8165                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8166         }
8167
8168         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8169                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8170                 val = tp->mac_mode;
8171         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8172                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8173                 val = tp->mac_mode;
8174         } else
8175                 val = 0;
8176
8177         tw32_f(MAC_MODE, val);
8178         udelay(40);
8179
8180         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8181
8182         err = tg3_poll_fw(tp);
8183         if (err)
8184                 return err;
8185
8186         tg3_mdio_start(tp);
8187
8188         if (tg3_flag(tp, PCI_EXPRESS) &&
8189             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8190             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8191             !tg3_flag(tp, 57765_PLUS)) {
8192                 val = tr32(0x7c00);
8193
8194                 tw32(0x7c00, val | (1 << 25));
8195         }
8196
8197         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8198                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8199                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8200         }
8201
8202         /* Reprobe ASF enable state.  */
8203         tg3_flag_clear(tp, ENABLE_ASF);
8204         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8205         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8206         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8207                 u32 nic_cfg;
8208
8209                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8210                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8211                         tg3_flag_set(tp, ENABLE_ASF);
8212                         tp->last_event_jiffies = jiffies;
8213                         if (tg3_flag(tp, 5750_PLUS))
8214                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8215                 }
8216         }
8217
8218         return 0;
8219 }
8220
8221 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8222 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8223
8224 /* tp->lock is held. */
8225 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8226 {
8227         int err;
8228
8229         tg3_stop_fw(tp);
8230
8231         tg3_write_sig_pre_reset(tp, kind);
8232
8233         tg3_abort_hw(tp, silent);
8234         err = tg3_chip_reset(tp);
8235
8236         __tg3_set_mac_addr(tp, 0);
8237
8238         tg3_write_sig_legacy(tp, kind);
8239         tg3_write_sig_post_reset(tp, kind);
8240
8241         if (tp->hw_stats) {
8242                 /* Save the stats across chip resets... */
8243                 tg3_get_nstats(tp, &tp->net_stats_prev);
8244                 tg3_get_estats(tp, &tp->estats_prev);
8245
8246                 /* And make sure the next sample is new data */
8247                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8248         }
8249
8250         if (err)
8251                 return err;
8252
8253         return 0;
8254 }
8255
8256 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8257 {
8258         struct tg3 *tp = netdev_priv(dev);
8259         struct sockaddr *addr = p;
8260         int err = 0, skip_mac_1 = 0;
8261
8262         if (!is_valid_ether_addr(addr->sa_data))
8263                 return -EADDRNOTAVAIL;
8264
8265         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8266
8267         if (!netif_running(dev))
8268                 return 0;
8269
8270         if (tg3_flag(tp, ENABLE_ASF)) {
8271                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8272
8273                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8274                 addr0_low = tr32(MAC_ADDR_0_LOW);
8275                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8276                 addr1_low = tr32(MAC_ADDR_1_LOW);
8277
8278                 /* Skip MAC addr 1 if ASF is using it. */
8279                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8280                     !(addr1_high == 0 && addr1_low == 0))
8281                         skip_mac_1 = 1;
8282         }
8283         spin_lock_bh(&tp->lock);
8284         __tg3_set_mac_addr(tp, skip_mac_1);
8285         spin_unlock_bh(&tp->lock);
8286
8287         return err;
8288 }
8289
8290 /* tp->lock is held. */
8291 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8292                            dma_addr_t mapping, u32 maxlen_flags,
8293                            u32 nic_addr)
8294 {
8295         tg3_write_mem(tp,
8296                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8297                       ((u64) mapping >> 32));
8298         tg3_write_mem(tp,
8299                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8300                       ((u64) mapping & 0xffffffff));
8301         tg3_write_mem(tp,
8302                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8303                        maxlen_flags);
8304
8305         if (!tg3_flag(tp, 5705_PLUS))
8306                 tg3_write_mem(tp,
8307                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8308                               nic_addr);
8309 }
8310
8311
8312 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8313 {
8314         int i = 0;
8315
8316         if (!tg3_flag(tp, ENABLE_TSS)) {
8317                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8318                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8319                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8320         } else {
8321                 tw32(HOSTCC_TXCOL_TICKS, 0);
8322                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8323                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8324
8325                 for (; i < tp->txq_cnt; i++) {
8326                         u32 reg;
8327
8328                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8329                         tw32(reg, ec->tx_coalesce_usecs);
8330                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8331                         tw32(reg, ec->tx_max_coalesced_frames);
8332                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8333                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8334                 }
8335         }
8336
8337         for (; i < tp->irq_max - 1; i++) {
8338                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8339                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8340                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8341         }
8342 }
8343
8344 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8345 {
8346         int i = 0;
8347         u32 limit = tp->rxq_cnt;
8348
8349         if (!tg3_flag(tp, ENABLE_RSS)) {
8350                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8351                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8352                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8353                 limit--;
8354         } else {
8355                 tw32(HOSTCC_RXCOL_TICKS, 0);
8356                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8357                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8358         }
8359
8360         for (; i < limit; i++) {
8361                 u32 reg;
8362
8363                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8364                 tw32(reg, ec->rx_coalesce_usecs);
8365                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8366                 tw32(reg, ec->rx_max_coalesced_frames);
8367                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8368                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8369         }
8370
8371         for (; i < tp->irq_max - 1; i++) {
8372                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8373                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8374                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8375         }
8376 }
8377
8378 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8379 {
8380         tg3_coal_tx_init(tp, ec);
8381         tg3_coal_rx_init(tp, ec);
8382
8383         if (!tg3_flag(tp, 5705_PLUS)) {
8384                 u32 val = ec->stats_block_coalesce_usecs;
8385
8386                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8387                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8388
8389                 if (!netif_carrier_ok(tp->dev))
8390                         val = 0;
8391
8392                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8393         }
8394 }
8395
8396 /* tp->lock is held. */
8397 static void tg3_rings_reset(struct tg3 *tp)
8398 {
8399         int i;
8400         u32 stblk, txrcb, rxrcb, limit;
8401         struct tg3_napi *tnapi = &tp->napi[0];
8402
8403         /* Disable all transmit rings but the first. */
8404         if (!tg3_flag(tp, 5705_PLUS))
8405                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8406         else if (tg3_flag(tp, 5717_PLUS))
8407                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8408         else if (tg3_flag(tp, 57765_CLASS))
8409                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8410         else
8411                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8412
8413         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8414              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8415                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8416                               BDINFO_FLAGS_DISABLED);
8417
8418
8419         /* Disable all receive return rings but the first. */
8420         if (tg3_flag(tp, 5717_PLUS))
8421                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8422         else if (!tg3_flag(tp, 5705_PLUS))
8423                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8424         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8425                  tg3_flag(tp, 57765_CLASS))
8426                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8427         else
8428                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8429
8430         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8431              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8432                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8433                               BDINFO_FLAGS_DISABLED);
8434
8435         /* Disable interrupts */
8436         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8437         tp->napi[0].chk_msi_cnt = 0;
8438         tp->napi[0].last_rx_cons = 0;
8439         tp->napi[0].last_tx_cons = 0;
8440
8441         /* Zero mailbox registers. */
8442         if (tg3_flag(tp, SUPPORT_MSIX)) {
8443                 for (i = 1; i < tp->irq_max; i++) {
8444                         tp->napi[i].tx_prod = 0;
8445                         tp->napi[i].tx_cons = 0;
8446                         if (tg3_flag(tp, ENABLE_TSS))
8447                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8448                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8449                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8450                         tp->napi[i].chk_msi_cnt = 0;
8451                         tp->napi[i].last_rx_cons = 0;
8452                         tp->napi[i].last_tx_cons = 0;
8453                 }
8454                 if (!tg3_flag(tp, ENABLE_TSS))
8455                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8456         } else {
8457                 tp->napi[0].tx_prod = 0;
8458                 tp->napi[0].tx_cons = 0;
8459                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8460                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8461         }
8462
8463         /* Make sure the NIC-based send BD rings are disabled. */
8464         if (!tg3_flag(tp, 5705_PLUS)) {
8465                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8466                 for (i = 0; i < 16; i++)
8467                         tw32_tx_mbox(mbox + i * 8, 0);
8468         }
8469
8470         txrcb = NIC_SRAM_SEND_RCB;
8471         rxrcb = NIC_SRAM_RCV_RET_RCB;
8472
8473         /* Clear status block in ram. */
8474         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8475
8476         /* Set status block DMA address */
8477         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8478              ((u64) tnapi->status_mapping >> 32));
8479         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8480              ((u64) tnapi->status_mapping & 0xffffffff));
8481
8482         if (tnapi->tx_ring) {
8483                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8484                                (TG3_TX_RING_SIZE <<
8485                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8486                                NIC_SRAM_TX_BUFFER_DESC);
8487                 txrcb += TG3_BDINFO_SIZE;
8488         }
8489
8490         if (tnapi->rx_rcb) {
8491                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8492                                (tp->rx_ret_ring_mask + 1) <<
8493                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8494                 rxrcb += TG3_BDINFO_SIZE;
8495         }
8496
8497         stblk = HOSTCC_STATBLCK_RING1;
8498
8499         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8500                 u64 mapping = (u64)tnapi->status_mapping;
8501                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8502                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8503
8504                 /* Clear status block in ram. */
8505                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8506
8507                 if (tnapi->tx_ring) {
8508                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8509                                        (TG3_TX_RING_SIZE <<
8510                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8511                                        NIC_SRAM_TX_BUFFER_DESC);
8512                         txrcb += TG3_BDINFO_SIZE;
8513                 }
8514
8515                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8516                                ((tp->rx_ret_ring_mask + 1) <<
8517                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8518
8519                 stblk += 8;
8520                 rxrcb += TG3_BDINFO_SIZE;
8521         }
8522 }
8523
8524 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8525 {
8526         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8527
8528         if (!tg3_flag(tp, 5750_PLUS) ||
8529             tg3_flag(tp, 5780_CLASS) ||
8530             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8531             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8532             tg3_flag(tp, 57765_PLUS))
8533                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8534         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8535                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8536                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8537         else
8538                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8539
8540         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8541         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8542
8543         val = min(nic_rep_thresh, host_rep_thresh);
8544         tw32(RCVBDI_STD_THRESH, val);
8545
8546         if (tg3_flag(tp, 57765_PLUS))
8547                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8548
8549         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8550                 return;
8551
8552         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8553
8554         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8555
8556         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8557         tw32(RCVBDI_JUMBO_THRESH, val);
8558
8559         if (tg3_flag(tp, 57765_PLUS))
8560                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8561 }
8562
8563 static inline u32 calc_crc(unsigned char *buf, int len)
8564 {
8565         u32 reg;
8566         u32 tmp;
8567         int j, k;
8568
8569         reg = 0xffffffff;
8570
8571         for (j = 0; j < len; j++) {
8572                 reg ^= buf[j];
8573
8574                 for (k = 0; k < 8; k++) {
8575                         tmp = reg & 0x01;
8576
8577                         reg >>= 1;
8578
8579                         if (tmp)
8580                                 reg ^= 0xedb88320;
8581                 }
8582         }
8583
8584         return ~reg;
8585 }
8586
8587 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8588 {
8589         /* accept or reject all multicast frames */
8590         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8591         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8592         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8593         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8594 }
8595
8596 static void __tg3_set_rx_mode(struct net_device *dev)
8597 {
8598         struct tg3 *tp = netdev_priv(dev);
8599         u32 rx_mode;
8600
8601         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8602                                   RX_MODE_KEEP_VLAN_TAG);
8603
8604 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8605         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8606          * flag clear.
8607          */
8608         if (!tg3_flag(tp, ENABLE_ASF))
8609                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8610 #endif
8611
8612         if (dev->flags & IFF_PROMISC) {
8613                 /* Promiscuous mode. */
8614                 rx_mode |= RX_MODE_PROMISC;
8615         } else if (dev->flags & IFF_ALLMULTI) {
8616                 /* Accept all multicast. */
8617                 tg3_set_multi(tp, 1);
8618         } else if (netdev_mc_empty(dev)) {
8619                 /* Reject all multicast. */
8620                 tg3_set_multi(tp, 0);
8621         } else {
8622                 /* Accept one or more multicast(s). */
8623                 struct netdev_hw_addr *ha;
8624                 u32 mc_filter[4] = { 0, };
8625                 u32 regidx;
8626                 u32 bit;
8627                 u32 crc;
8628
8629                 netdev_for_each_mc_addr(ha, dev) {
8630                         crc = calc_crc(ha->addr, ETH_ALEN);
8631                         bit = ~crc & 0x7f;
8632                         regidx = (bit & 0x60) >> 5;
8633                         bit &= 0x1f;
8634                         mc_filter[regidx] |= (1 << bit);
8635                 }
8636
8637                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8638                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8639                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8640                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8641         }
8642
8643         if (rx_mode != tp->rx_mode) {
8644                 tp->rx_mode = rx_mode;
8645                 tw32_f(MAC_RX_MODE, rx_mode);
8646                 udelay(10);
8647         }
8648 }
8649
8650 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8651 {
8652         int i;
8653
8654         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8655                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8656 }
8657
8658 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8659 {
8660         int i;
8661
8662         if (!tg3_flag(tp, SUPPORT_MSIX))
8663                 return;
8664
8665         if (tp->irq_cnt <= 2) {
8666                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8667                 return;
8668         }
8669
8670         /* Validate table against current IRQ count */
8671         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8672                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8673                         break;
8674         }
8675
8676         if (i != TG3_RSS_INDIR_TBL_SIZE)
8677                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8678 }
8679
8680 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8681 {
8682         int i = 0;
8683         u32 reg = MAC_RSS_INDIR_TBL_0;
8684
8685         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8686                 u32 val = tp->rss_ind_tbl[i];
8687                 i++;
8688                 for (; i % 8; i++) {
8689                         val <<= 4;
8690                         val |= tp->rss_ind_tbl[i];
8691                 }
8692                 tw32(reg, val);
8693                 reg += 4;
8694         }
8695 }
8696
8697 /* tp->lock is held. */
8698 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8699 {
8700         u32 val, rdmac_mode;
8701         int i, err, limit;
8702         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8703
8704         tg3_disable_ints(tp);
8705
8706         tg3_stop_fw(tp);
8707
8708         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8709
8710         if (tg3_flag(tp, INIT_COMPLETE))
8711                 tg3_abort_hw(tp, 1);
8712
8713         /* Enable MAC control of LPI */
8714         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8715                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8716                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8717                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8718
8719                 tw32_f(TG3_CPMU_EEE_CTRL,
8720                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8721
8722                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8723                       TG3_CPMU_EEEMD_LPI_IN_TX |
8724                       TG3_CPMU_EEEMD_LPI_IN_RX |
8725                       TG3_CPMU_EEEMD_EEE_ENABLE;
8726
8727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8728                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8729
8730                 if (tg3_flag(tp, ENABLE_APE))
8731                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8732
8733                 tw32_f(TG3_CPMU_EEE_MODE, val);
8734
8735                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8736                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8737                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8738
8739                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8740                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8741                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8742         }
8743
8744         if (reset_phy)
8745                 tg3_phy_reset(tp);
8746
8747         err = tg3_chip_reset(tp);
8748         if (err)
8749                 return err;
8750
8751         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8752
8753         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8754                 val = tr32(TG3_CPMU_CTRL);
8755                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8756                 tw32(TG3_CPMU_CTRL, val);
8757
8758                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8759                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8760                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8761                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8762
8763                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8764                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8765                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8766                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8767
8768                 val = tr32(TG3_CPMU_HST_ACC);
8769                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8770                 val |= CPMU_HST_ACC_MACCLK_6_25;
8771                 tw32(TG3_CPMU_HST_ACC, val);
8772         }
8773
8774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8775                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8776                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8777                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8778                 tw32(PCIE_PWR_MGMT_THRESH, val);
8779
8780                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8781                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8782
8783                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8784
8785                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8786                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8787         }
8788
8789         if (tg3_flag(tp, L1PLLPD_EN)) {
8790                 u32 grc_mode = tr32(GRC_MODE);
8791
8792                 /* Access the lower 1K of PL PCIE block registers. */
8793                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8794                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8795
8796                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8797                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8798                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8799
8800                 tw32(GRC_MODE, grc_mode);
8801         }
8802
8803         if (tg3_flag(tp, 57765_CLASS)) {
8804                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8805                         u32 grc_mode = tr32(GRC_MODE);
8806
8807                         /* Access the lower 1K of PL PCIE block registers. */
8808                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8809                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8810
8811                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8812                                    TG3_PCIE_PL_LO_PHYCTL5);
8813                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8814                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8815
8816                         tw32(GRC_MODE, grc_mode);
8817                 }
8818
8819                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8820                         u32 grc_mode = tr32(GRC_MODE);
8821
8822                         /* Access the lower 1K of DL PCIE block registers. */
8823                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8824                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8825
8826                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8827                                    TG3_PCIE_DL_LO_FTSMAX);
8828                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8829                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8830                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8831
8832                         tw32(GRC_MODE, grc_mode);
8833                 }
8834
8835                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8836                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8837                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8838                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8839         }
8840
8841         /* This works around an issue with Athlon chipsets on
8842          * B3 tigon3 silicon.  This bit has no effect on any
8843          * other revision.  But do not set this on PCI Express
8844          * chips and don't even touch the clocks if the CPMU is present.
8845          */
8846         if (!tg3_flag(tp, CPMU_PRESENT)) {
8847                 if (!tg3_flag(tp, PCI_EXPRESS))
8848                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8849                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8850         }
8851
8852         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8853             tg3_flag(tp, PCIX_MODE)) {
8854                 val = tr32(TG3PCI_PCISTATE);
8855                 val |= PCISTATE_RETRY_SAME_DMA;
8856                 tw32(TG3PCI_PCISTATE, val);
8857         }
8858
8859         if (tg3_flag(tp, ENABLE_APE)) {
8860                 /* Allow reads and writes to the
8861                  * APE register and memory space.
8862                  */
8863                 val = tr32(TG3PCI_PCISTATE);
8864                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8865                        PCISTATE_ALLOW_APE_SHMEM_WR |
8866                        PCISTATE_ALLOW_APE_PSPACE_WR;
8867                 tw32(TG3PCI_PCISTATE, val);
8868         }
8869
8870         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8871                 /* Enable some hw fixes.  */
8872                 val = tr32(TG3PCI_MSI_DATA);
8873                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8874                 tw32(TG3PCI_MSI_DATA, val);
8875         }
8876
8877         /* Descriptor ring init may make accesses to the
8878          * NIC SRAM area to setup the TX descriptors, so we
8879          * can only do this after the hardware has been
8880          * successfully reset.
8881          */
8882         err = tg3_init_rings(tp);
8883         if (err)
8884                 return err;
8885
8886         if (tg3_flag(tp, 57765_PLUS)) {
8887                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8888                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8889                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8890                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8891                 if (!tg3_flag(tp, 57765_CLASS) &&
8892                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8893                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8894                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8895         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8896                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8897                 /* This value is determined during the probe time DMA
8898                  * engine test, tg3_test_dma.
8899                  */
8900                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8901         }
8902
8903         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8904                           GRC_MODE_4X_NIC_SEND_RINGS |
8905                           GRC_MODE_NO_TX_PHDR_CSUM |
8906                           GRC_MODE_NO_RX_PHDR_CSUM);
8907         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8908
8909         /* Pseudo-header checksum is done by hardware logic and not
8910          * the offload processers, so make the chip do the pseudo-
8911          * header checksums on receive.  For transmit it is more
8912          * convenient to do the pseudo-header checksum in software
8913          * as Linux does that on transmit for us in all cases.
8914          */
8915         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8916
8917         tw32(GRC_MODE,
8918              tp->grc_mode |
8919              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8920
8921         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8922         val = tr32(GRC_MISC_CFG);
8923         val &= ~0xff;
8924         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8925         tw32(GRC_MISC_CFG, val);
8926
8927         /* Initialize MBUF/DESC pool. */
8928         if (tg3_flag(tp, 5750_PLUS)) {
8929                 /* Do nothing.  */
8930         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8931                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8932                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8933                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8934                 else
8935                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8936                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8937                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8938         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8939                 int fw_len;
8940
8941                 fw_len = tp->fw_len;
8942                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8943                 tw32(BUFMGR_MB_POOL_ADDR,
8944                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8945                 tw32(BUFMGR_MB_POOL_SIZE,
8946                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8947         }
8948
8949         if (tp->dev->mtu <= ETH_DATA_LEN) {
8950                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8951                      tp->bufmgr_config.mbuf_read_dma_low_water);
8952                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8953                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8954                 tw32(BUFMGR_MB_HIGH_WATER,
8955                      tp->bufmgr_config.mbuf_high_water);
8956         } else {
8957                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8958                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8959                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8960                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8961                 tw32(BUFMGR_MB_HIGH_WATER,
8962                      tp->bufmgr_config.mbuf_high_water_jumbo);
8963         }
8964         tw32(BUFMGR_DMA_LOW_WATER,
8965              tp->bufmgr_config.dma_low_water);
8966         tw32(BUFMGR_DMA_HIGH_WATER,
8967              tp->bufmgr_config.dma_high_water);
8968
8969         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8971                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8972         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8973             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8974             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8975                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8976         tw32(BUFMGR_MODE, val);
8977         for (i = 0; i < 2000; i++) {
8978                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8979                         break;
8980                 udelay(10);
8981         }
8982         if (i >= 2000) {
8983                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8984                 return -ENODEV;
8985         }
8986
8987         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8988                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8989
8990         tg3_setup_rxbd_thresholds(tp);
8991
8992         /* Initialize TG3_BDINFO's at:
8993          *  RCVDBDI_STD_BD:     standard eth size rx ring
8994          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8995          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8996          *
8997          * like so:
8998          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8999          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9000          *                              ring attribute flags
9001          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9002          *
9003          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9004          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9005          *
9006          * The size of each ring is fixed in the firmware, but the location is
9007          * configurable.
9008          */
9009         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9010              ((u64) tpr->rx_std_mapping >> 32));
9011         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9012              ((u64) tpr->rx_std_mapping & 0xffffffff));
9013         if (!tg3_flag(tp, 5717_PLUS))
9014                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9015                      NIC_SRAM_RX_BUFFER_DESC);
9016
9017         /* Disable the mini ring */
9018         if (!tg3_flag(tp, 5705_PLUS))
9019                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9020                      BDINFO_FLAGS_DISABLED);
9021
9022         /* Program the jumbo buffer descriptor ring control
9023          * blocks on those devices that have them.
9024          */
9025         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9026             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9027
9028                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9029                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9030                              ((u64) tpr->rx_jmb_mapping >> 32));
9031                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9032                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9033                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9034                               BDINFO_FLAGS_MAXLEN_SHIFT;
9035                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9036                              val | BDINFO_FLAGS_USE_EXT_RECV);
9037                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9038                             tg3_flag(tp, 57765_CLASS))
9039                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9040                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9041                 } else {
9042                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9043                              BDINFO_FLAGS_DISABLED);
9044                 }
9045
9046                 if (tg3_flag(tp, 57765_PLUS)) {
9047                         val = TG3_RX_STD_RING_SIZE(tp);
9048                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9049                         val |= (TG3_RX_STD_DMA_SZ << 2);
9050                 } else
9051                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9052         } else
9053                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9054
9055         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9056
9057         tpr->rx_std_prod_idx = tp->rx_pending;
9058         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9059
9060         tpr->rx_jmb_prod_idx =
9061                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9062         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9063
9064         tg3_rings_reset(tp);
9065
9066         /* Initialize MAC address and backoff seed. */
9067         __tg3_set_mac_addr(tp, 0);
9068
9069         /* MTU + ethernet header + FCS + optional VLAN tag */
9070         tw32(MAC_RX_MTU_SIZE,
9071              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9072
9073         /* The slot time is changed by tg3_setup_phy if we
9074          * run at gigabit with half duplex.
9075          */
9076         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9077               (6 << TX_LENGTHS_IPG_SHIFT) |
9078               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9079
9080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9081                 val |= tr32(MAC_TX_LENGTHS) &
9082                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9083                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9084
9085         tw32(MAC_TX_LENGTHS, val);
9086
9087         /* Receive rules. */
9088         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9089         tw32(RCVLPC_CONFIG, 0x0181);
9090
9091         /* Calculate RDMAC_MODE setting early, we need it to determine
9092          * the RCVLPC_STATE_ENABLE mask.
9093          */
9094         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9095                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9096                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9097                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9098                       RDMAC_MODE_LNGREAD_ENAB);
9099
9100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9101                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9102
9103         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9106                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9107                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9108                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9109
9110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9111             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9112                 if (tg3_flag(tp, TSO_CAPABLE) &&
9113                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9114                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9115                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9116                            !tg3_flag(tp, IS_5788)) {
9117                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9118                 }
9119         }
9120
9121         if (tg3_flag(tp, PCI_EXPRESS))
9122                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9123
9124         if (tg3_flag(tp, HW_TSO_1) ||
9125             tg3_flag(tp, HW_TSO_2) ||
9126             tg3_flag(tp, HW_TSO_3))
9127                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9128
9129         if (tg3_flag(tp, 57765_PLUS) ||
9130             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9131             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9132                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9133
9134         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9135                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9136
9137         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9138             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9139             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9140             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9141             tg3_flag(tp, 57765_PLUS)) {
9142                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9143                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9144                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9145                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9146                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9147                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9148                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9149                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9150                 }
9151                 tw32(TG3_RDMA_RSRVCTRL_REG,
9152                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9153         }
9154
9155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9156             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9157                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9158                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9159                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9160                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9161         }
9162
9163         /* Receive/send statistics. */
9164         if (tg3_flag(tp, 5750_PLUS)) {
9165                 val = tr32(RCVLPC_STATS_ENABLE);
9166                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9167                 tw32(RCVLPC_STATS_ENABLE, val);
9168         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9169                    tg3_flag(tp, TSO_CAPABLE)) {
9170                 val = tr32(RCVLPC_STATS_ENABLE);
9171                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9172                 tw32(RCVLPC_STATS_ENABLE, val);
9173         } else {
9174                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9175         }
9176         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9177         tw32(SNDDATAI_STATSENAB, 0xffffff);
9178         tw32(SNDDATAI_STATSCTRL,
9179              (SNDDATAI_SCTRL_ENABLE |
9180               SNDDATAI_SCTRL_FASTUPD));
9181
9182         /* Setup host coalescing engine. */
9183         tw32(HOSTCC_MODE, 0);
9184         for (i = 0; i < 2000; i++) {
9185                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9186                         break;
9187                 udelay(10);
9188         }
9189
9190         __tg3_set_coalesce(tp, &tp->coal);
9191
9192         if (!tg3_flag(tp, 5705_PLUS)) {
9193                 /* Status/statistics block address.  See tg3_timer,
9194                  * the tg3_periodic_fetch_stats call there, and
9195                  * tg3_get_stats to see how this works for 5705/5750 chips.
9196                  */
9197                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9198                      ((u64) tp->stats_mapping >> 32));
9199                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9200                      ((u64) tp->stats_mapping & 0xffffffff));
9201                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9202
9203                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9204
9205                 /* Clear statistics and status block memory areas */
9206                 for (i = NIC_SRAM_STATS_BLK;
9207                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9208                      i += sizeof(u32)) {
9209                         tg3_write_mem(tp, i, 0);
9210                         udelay(40);
9211                 }
9212         }
9213
9214         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9215
9216         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9217         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9218         if (!tg3_flag(tp, 5705_PLUS))
9219                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9220
9221         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9222                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9223                 /* reset to prevent losing 1st rx packet intermittently */
9224                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9225                 udelay(10);
9226         }
9227
9228         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9229                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9230                         MAC_MODE_FHDE_ENABLE;
9231         if (tg3_flag(tp, ENABLE_APE))
9232                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9233         if (!tg3_flag(tp, 5705_PLUS) &&
9234             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9235             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9236                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9237         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9238         udelay(40);
9239
9240         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9241          * If TG3_FLAG_IS_NIC is zero, we should read the
9242          * register to preserve the GPIO settings for LOMs. The GPIOs,
9243          * whether used as inputs or outputs, are set by boot code after
9244          * reset.
9245          */
9246         if (!tg3_flag(tp, IS_NIC)) {
9247                 u32 gpio_mask;
9248
9249                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9250                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9251                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9252
9253                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9254                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9255                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9256
9257                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9258                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9259
9260                 tp->grc_local_ctrl &= ~gpio_mask;
9261                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9262
9263                 /* GPIO1 must be driven high for eeprom write protect */
9264                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9265                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9266                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9267         }
9268         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9269         udelay(100);
9270
9271         if (tg3_flag(tp, USING_MSIX)) {
9272                 val = tr32(MSGINT_MODE);
9273                 val |= MSGINT_MODE_ENABLE;
9274                 if (tp->irq_cnt > 1)
9275                         val |= MSGINT_MODE_MULTIVEC_EN;
9276                 if (!tg3_flag(tp, 1SHOT_MSI))
9277                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9278                 tw32(MSGINT_MODE, val);
9279         }
9280
9281         if (!tg3_flag(tp, 5705_PLUS)) {
9282                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9283                 udelay(40);
9284         }
9285
9286         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9287                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9288                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9289                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9290                WDMAC_MODE_LNGREAD_ENAB);
9291
9292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9293             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9294                 if (tg3_flag(tp, TSO_CAPABLE) &&
9295                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9296                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9297                         /* nothing */
9298                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9299                            !tg3_flag(tp, IS_5788)) {
9300                         val |= WDMAC_MODE_RX_ACCEL;
9301                 }
9302         }
9303
9304         /* Enable host coalescing bug fix */
9305         if (tg3_flag(tp, 5755_PLUS))
9306                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9307
9308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9309                 val |= WDMAC_MODE_BURST_ALL_DATA;
9310
9311         tw32_f(WDMAC_MODE, val);
9312         udelay(40);
9313
9314         if (tg3_flag(tp, PCIX_MODE)) {
9315                 u16 pcix_cmd;
9316
9317                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9318                                      &pcix_cmd);
9319                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9320                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9321                         pcix_cmd |= PCI_X_CMD_READ_2K;
9322                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9323                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9324                         pcix_cmd |= PCI_X_CMD_READ_2K;
9325                 }
9326                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9327                                       pcix_cmd);
9328         }
9329
9330         tw32_f(RDMAC_MODE, rdmac_mode);
9331         udelay(40);
9332
9333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9334                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9335                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9336                                 break;
9337                 }
9338                 if (i < TG3_NUM_RDMA_CHANNELS) {
9339                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9340                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9341                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9342                         tg3_flag_set(tp, 5719_RDMA_BUG);
9343                 }
9344         }
9345
9346         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9347         if (!tg3_flag(tp, 5705_PLUS))
9348                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9349
9350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9351                 tw32(SNDDATAC_MODE,
9352                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9353         else
9354                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9355
9356         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9357         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9358         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9359         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9360                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9361         tw32(RCVDBDI_MODE, val);
9362         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9363         if (tg3_flag(tp, HW_TSO_1) ||
9364             tg3_flag(tp, HW_TSO_2) ||
9365             tg3_flag(tp, HW_TSO_3))
9366                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9367         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9368         if (tg3_flag(tp, ENABLE_TSS))
9369                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9370         tw32(SNDBDI_MODE, val);
9371         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9372
9373         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9374                 err = tg3_load_5701_a0_firmware_fix(tp);
9375                 if (err)
9376                         return err;
9377         }
9378
9379         if (tg3_flag(tp, TSO_CAPABLE)) {
9380                 err = tg3_load_tso_firmware(tp);
9381                 if (err)
9382                         return err;
9383         }
9384
9385         tp->tx_mode = TX_MODE_ENABLE;
9386
9387         if (tg3_flag(tp, 5755_PLUS) ||
9388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9389                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9390
9391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9392                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9393                 tp->tx_mode &= ~val;
9394                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9395         }
9396
9397         tw32_f(MAC_TX_MODE, tp->tx_mode);
9398         udelay(100);
9399
9400         if (tg3_flag(tp, ENABLE_RSS)) {
9401                 tg3_rss_write_indir_tbl(tp);
9402
9403                 /* Setup the "secret" hash key. */
9404                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9405                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9406                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9407                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9408                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9409                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9410                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9411                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9412                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9413                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9414         }
9415
9416         tp->rx_mode = RX_MODE_ENABLE;
9417         if (tg3_flag(tp, 5755_PLUS))
9418                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9419
9420         if (tg3_flag(tp, ENABLE_RSS))
9421                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9422                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9423                                RX_MODE_RSS_IPV6_HASH_EN |
9424                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9425                                RX_MODE_RSS_IPV4_HASH_EN |
9426                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9427
9428         tw32_f(MAC_RX_MODE, tp->rx_mode);
9429         udelay(10);
9430
9431         tw32(MAC_LED_CTRL, tp->led_ctrl);
9432
9433         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9434         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9435                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9436                 udelay(10);
9437         }
9438         tw32_f(MAC_RX_MODE, tp->rx_mode);
9439         udelay(10);
9440
9441         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9442                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9443                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9444                         /* Set drive transmission level to 1.2V  */
9445                         /* only if the signal pre-emphasis bit is not set  */
9446                         val = tr32(MAC_SERDES_CFG);
9447                         val &= 0xfffff000;
9448                         val |= 0x880;
9449                         tw32(MAC_SERDES_CFG, val);
9450                 }
9451                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9452                         tw32(MAC_SERDES_CFG, 0x616000);
9453         }
9454
9455         /* Prevent chip from dropping frames when flow control
9456          * is enabled.
9457          */
9458         if (tg3_flag(tp, 57765_CLASS))
9459                 val = 1;
9460         else
9461                 val = 2;
9462         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9463
9464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9465             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9466                 /* Use hardware link auto-negotiation */
9467                 tg3_flag_set(tp, HW_AUTONEG);
9468         }
9469
9470         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9472                 u32 tmp;
9473
9474                 tmp = tr32(SERDES_RX_CTRL);
9475                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9476                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9477                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9478                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9479         }
9480
9481         if (!tg3_flag(tp, USE_PHYLIB)) {
9482                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9483                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9484
9485                 err = tg3_setup_phy(tp, 0);
9486                 if (err)
9487                         return err;
9488
9489                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9490                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9491                         u32 tmp;
9492
9493                         /* Clear CRC stats. */
9494                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9495                                 tg3_writephy(tp, MII_TG3_TEST1,
9496                                              tmp | MII_TG3_TEST1_CRC_EN);
9497                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9498                         }
9499                 }
9500         }
9501
9502         __tg3_set_rx_mode(tp->dev);
9503
9504         /* Initialize receive rules. */
9505         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9506         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9507         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9508         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9509
9510         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9511                 limit = 8;
9512         else
9513                 limit = 16;
9514         if (tg3_flag(tp, ENABLE_ASF))
9515                 limit -= 4;
9516         switch (limit) {
9517         case 16:
9518                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9519         case 15:
9520                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9521         case 14:
9522                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9523         case 13:
9524                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9525         case 12:
9526                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9527         case 11:
9528                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9529         case 10:
9530                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9531         case 9:
9532                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9533         case 8:
9534                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9535         case 7:
9536                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9537         case 6:
9538                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9539         case 5:
9540                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9541         case 4:
9542                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9543         case 3:
9544                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9545         case 2:
9546         case 1:
9547
9548         default:
9549                 break;
9550         }
9551
9552         if (tg3_flag(tp, ENABLE_APE))
9553                 /* Write our heartbeat update interval to APE. */
9554                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9555                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9556
9557         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9558
9559         return 0;
9560 }
9561
9562 /* Called at device open time to get the chip ready for
9563  * packet processing.  Invoked with tp->lock held.
9564  */
9565 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9566 {
9567         tg3_switch_clocks(tp);
9568
9569         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9570
9571         return tg3_reset_hw(tp, reset_phy);
9572 }
9573
9574 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9575 {
9576         int i;
9577
9578         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9579                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9580
9581                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9582                 off += len;
9583
9584                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9585                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9586                         memset(ocir, 0, TG3_OCIR_LEN);
9587         }
9588 }
9589
9590 /* sysfs attributes for hwmon */
9591 static ssize_t tg3_show_temp(struct device *dev,
9592                              struct device_attribute *devattr, char *buf)
9593 {
9594         struct pci_dev *pdev = to_pci_dev(dev);
9595         struct net_device *netdev = pci_get_drvdata(pdev);
9596         struct tg3 *tp = netdev_priv(netdev);
9597         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9598         u32 temperature;
9599
9600         spin_lock_bh(&tp->lock);
9601         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9602                                 sizeof(temperature));
9603         spin_unlock_bh(&tp->lock);
9604         return sprintf(buf, "%u\n", temperature);
9605 }
9606
9607
9608 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9609                           TG3_TEMP_SENSOR_OFFSET);
9610 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9611                           TG3_TEMP_CAUTION_OFFSET);
9612 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9613                           TG3_TEMP_MAX_OFFSET);
9614
9615 static struct attribute *tg3_attributes[] = {
9616         &sensor_dev_attr_temp1_input.dev_attr.attr,
9617         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9618         &sensor_dev_attr_temp1_max.dev_attr.attr,
9619         NULL
9620 };
9621
9622 static const struct attribute_group tg3_group = {
9623         .attrs = tg3_attributes,
9624 };
9625
9626 static void tg3_hwmon_close(struct tg3 *tp)
9627 {
9628         if (tp->hwmon_dev) {
9629                 hwmon_device_unregister(tp->hwmon_dev);
9630                 tp->hwmon_dev = NULL;
9631                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9632         }
9633 }
9634
9635 static void tg3_hwmon_open(struct tg3 *tp)
9636 {
9637         int i, err;
9638         u32 size = 0;
9639         struct pci_dev *pdev = tp->pdev;
9640         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9641
9642         tg3_sd_scan_scratchpad(tp, ocirs);
9643
9644         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9645                 if (!ocirs[i].src_data_length)
9646                         continue;
9647
9648                 size += ocirs[i].src_hdr_length;
9649                 size += ocirs[i].src_data_length;
9650         }
9651
9652         if (!size)
9653                 return;
9654
9655         /* Register hwmon sysfs hooks */
9656         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9657         if (err) {
9658                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9659                 return;
9660         }
9661
9662         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9663         if (IS_ERR(tp->hwmon_dev)) {
9664                 tp->hwmon_dev = NULL;
9665                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9666                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9667         }
9668 }
9669
9670
9671 #define TG3_STAT_ADD32(PSTAT, REG) \
9672 do {    u32 __val = tr32(REG); \
9673         (PSTAT)->low += __val; \
9674         if ((PSTAT)->low < __val) \
9675                 (PSTAT)->high += 1; \
9676 } while (0)
9677
9678 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9679 {
9680         struct tg3_hw_stats *sp = tp->hw_stats;
9681
9682         if (!netif_carrier_ok(tp->dev))
9683                 return;
9684
9685         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9686         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9687         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9688         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9689         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9690         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9691         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9692         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9693         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9694         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9695         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9696         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9697         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9698         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9699                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9700                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9701                 u32 val;
9702
9703                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9704                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9705                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9706                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9707         }
9708
9709         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9710         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9711         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9712         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9713         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9714         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9715         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9716         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9717         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9718         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9719         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9720         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9721         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9722         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9723
9724         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9725         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9726             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9727             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9728                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9729         } else {
9730                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9731                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9732                 if (val) {
9733                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9734                         sp->rx_discards.low += val;
9735                         if (sp->rx_discards.low < val)
9736                                 sp->rx_discards.high += 1;
9737                 }
9738                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9739         }
9740         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9741 }
9742
9743 static void tg3_chk_missed_msi(struct tg3 *tp)
9744 {
9745         u32 i;
9746
9747         for (i = 0; i < tp->irq_cnt; i++) {
9748                 struct tg3_napi *tnapi = &tp->napi[i];
9749
9750                 if (tg3_has_work(tnapi)) {
9751                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9752                             tnapi->last_tx_cons == tnapi->tx_cons) {
9753                                 if (tnapi->chk_msi_cnt < 1) {
9754                                         tnapi->chk_msi_cnt++;
9755                                         return;
9756                                 }
9757                                 tg3_msi(0, tnapi);
9758                         }
9759                 }
9760                 tnapi->chk_msi_cnt = 0;
9761                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9762                 tnapi->last_tx_cons = tnapi->tx_cons;
9763         }
9764 }
9765
9766 static void tg3_timer(unsigned long __opaque)
9767 {
9768         struct tg3 *tp = (struct tg3 *) __opaque;
9769
9770         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9771                 goto restart_timer;
9772
9773         spin_lock(&tp->lock);
9774
9775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9776             tg3_flag(tp, 57765_CLASS))
9777                 tg3_chk_missed_msi(tp);
9778
9779         if (!tg3_flag(tp, TAGGED_STATUS)) {
9780                 /* All of this garbage is because when using non-tagged
9781                  * IRQ status the mailbox/status_block protocol the chip
9782                  * uses with the cpu is race prone.
9783                  */
9784                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9785                         tw32(GRC_LOCAL_CTRL,
9786                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9787                 } else {
9788                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9789                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9790                 }
9791
9792                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9793                         spin_unlock(&tp->lock);
9794                         tg3_reset_task_schedule(tp);
9795                         goto restart_timer;
9796                 }
9797         }
9798
9799         /* This part only runs once per second. */
9800         if (!--tp->timer_counter) {
9801                 if (tg3_flag(tp, 5705_PLUS))
9802                         tg3_periodic_fetch_stats(tp);
9803
9804                 if (tp->setlpicnt && !--tp->setlpicnt)
9805                         tg3_phy_eee_enable(tp);
9806
9807                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9808                         u32 mac_stat;
9809                         int phy_event;
9810
9811                         mac_stat = tr32(MAC_STATUS);
9812
9813                         phy_event = 0;
9814                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9815                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9816                                         phy_event = 1;
9817                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9818                                 phy_event = 1;
9819
9820                         if (phy_event)
9821                                 tg3_setup_phy(tp, 0);
9822                 } else if (tg3_flag(tp, POLL_SERDES)) {
9823                         u32 mac_stat = tr32(MAC_STATUS);
9824                         int need_setup = 0;
9825
9826                         if (netif_carrier_ok(tp->dev) &&
9827                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9828                                 need_setup = 1;
9829                         }
9830                         if (!netif_carrier_ok(tp->dev) &&
9831                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9832                                          MAC_STATUS_SIGNAL_DET))) {
9833                                 need_setup = 1;
9834                         }
9835                         if (need_setup) {
9836                                 if (!tp->serdes_counter) {
9837                                         tw32_f(MAC_MODE,
9838                                              (tp->mac_mode &
9839                                               ~MAC_MODE_PORT_MODE_MASK));
9840                                         udelay(40);
9841                                         tw32_f(MAC_MODE, tp->mac_mode);
9842                                         udelay(40);
9843                                 }
9844                                 tg3_setup_phy(tp, 0);
9845                         }
9846                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9847                            tg3_flag(tp, 5780_CLASS)) {
9848                         tg3_serdes_parallel_detect(tp);
9849                 }
9850
9851                 tp->timer_counter = tp->timer_multiplier;
9852         }
9853
9854         /* Heartbeat is only sent once every 2 seconds.
9855          *
9856          * The heartbeat is to tell the ASF firmware that the host
9857          * driver is still alive.  In the event that the OS crashes,
9858          * ASF needs to reset the hardware to free up the FIFO space
9859          * that may be filled with rx packets destined for the host.
9860          * If the FIFO is full, ASF will no longer function properly.
9861          *
9862          * Unintended resets have been reported on real time kernels
9863          * where the timer doesn't run on time.  Netpoll will also have
9864          * same problem.
9865          *
9866          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9867          * to check the ring condition when the heartbeat is expiring
9868          * before doing the reset.  This will prevent most unintended
9869          * resets.
9870          */
9871         if (!--tp->asf_counter) {
9872                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9873                         tg3_wait_for_event_ack(tp);
9874
9875                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9876                                       FWCMD_NICDRV_ALIVE3);
9877                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9878                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9879                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9880
9881                         tg3_generate_fw_event(tp);
9882                 }
9883                 tp->asf_counter = tp->asf_multiplier;
9884         }
9885
9886         spin_unlock(&tp->lock);
9887
9888 restart_timer:
9889         tp->timer.expires = jiffies + tp->timer_offset;
9890         add_timer(&tp->timer);
9891 }
9892
9893 static void __devinit tg3_timer_init(struct tg3 *tp)
9894 {
9895         if (tg3_flag(tp, TAGGED_STATUS) &&
9896             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9897             !tg3_flag(tp, 57765_CLASS))
9898                 tp->timer_offset = HZ;
9899         else
9900                 tp->timer_offset = HZ / 10;
9901
9902         BUG_ON(tp->timer_offset > HZ);
9903
9904         tp->timer_multiplier = (HZ / tp->timer_offset);
9905         tp->asf_multiplier = (HZ / tp->timer_offset) *
9906                              TG3_FW_UPDATE_FREQ_SEC;
9907
9908         init_timer(&tp->timer);
9909         tp->timer.data = (unsigned long) tp;
9910         tp->timer.function = tg3_timer;
9911 }
9912
9913 static void tg3_timer_start(struct tg3 *tp)
9914 {
9915         tp->asf_counter   = tp->asf_multiplier;
9916         tp->timer_counter = tp->timer_multiplier;
9917
9918         tp->timer.expires = jiffies + tp->timer_offset;
9919         add_timer(&tp->timer);
9920 }
9921
9922 static void tg3_timer_stop(struct tg3 *tp)
9923 {
9924         del_timer_sync(&tp->timer);
9925 }
9926
9927 /* Restart hardware after configuration changes, self-test, etc.
9928  * Invoked with tp->lock held.
9929  */
9930 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9931         __releases(tp->lock)
9932         __acquires(tp->lock)
9933 {
9934         int err;
9935
9936         err = tg3_init_hw(tp, reset_phy);
9937         if (err) {
9938                 netdev_err(tp->dev,
9939                            "Failed to re-initialize device, aborting\n");
9940                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9941                 tg3_full_unlock(tp);
9942                 tg3_timer_stop(tp);
9943                 tp->irq_sync = 0;
9944                 tg3_napi_enable(tp);
9945                 dev_close(tp->dev);
9946                 tg3_full_lock(tp, 0);
9947         }
9948         return err;
9949 }
9950
9951 static void tg3_reset_task(struct work_struct *work)
9952 {
9953         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9954         int err;
9955
9956         tg3_full_lock(tp, 0);
9957
9958         if (!netif_running(tp->dev)) {
9959                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9960                 tg3_full_unlock(tp);
9961                 return;
9962         }
9963
9964         tg3_full_unlock(tp);
9965
9966         tg3_phy_stop(tp);
9967
9968         tg3_netif_stop(tp);
9969
9970         tg3_full_lock(tp, 1);
9971
9972         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9973                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9974                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9975                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9976                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9977         }
9978
9979         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9980         err = tg3_init_hw(tp, 1);
9981         if (err)
9982                 goto out;
9983
9984         tg3_netif_start(tp);
9985
9986 out:
9987         tg3_full_unlock(tp);
9988
9989         if (!err)
9990                 tg3_phy_start(tp);
9991
9992         tg3_flag_clear(tp, RESET_TASK_PENDING);
9993 }
9994
9995 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9996 {
9997         irq_handler_t fn;
9998         unsigned long flags;
9999         char *name;
10000         struct tg3_napi *tnapi = &tp->napi[irq_num];
10001
10002         if (tp->irq_cnt == 1)
10003                 name = tp->dev->name;
10004         else {
10005                 name = &tnapi->irq_lbl[0];
10006                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10007                 name[IFNAMSIZ-1] = 0;
10008         }
10009
10010         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10011                 fn = tg3_msi;
10012                 if (tg3_flag(tp, 1SHOT_MSI))
10013                         fn = tg3_msi_1shot;
10014                 flags = 0;
10015         } else {
10016                 fn = tg3_interrupt;
10017                 if (tg3_flag(tp, TAGGED_STATUS))
10018                         fn = tg3_interrupt_tagged;
10019                 flags = IRQF_SHARED;
10020         }
10021
10022         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10023 }
10024
10025 static int tg3_test_interrupt(struct tg3 *tp)
10026 {
10027         struct tg3_napi *tnapi = &tp->napi[0];
10028         struct net_device *dev = tp->dev;
10029         int err, i, intr_ok = 0;
10030         u32 val;
10031
10032         if (!netif_running(dev))
10033                 return -ENODEV;
10034
10035         tg3_disable_ints(tp);
10036
10037         free_irq(tnapi->irq_vec, tnapi);
10038
10039         /*
10040          * Turn off MSI one shot mode.  Otherwise this test has no
10041          * observable way to know whether the interrupt was delivered.
10042          */
10043         if (tg3_flag(tp, 57765_PLUS)) {
10044                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10045                 tw32(MSGINT_MODE, val);
10046         }
10047
10048         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10049                           IRQF_SHARED, dev->name, tnapi);
10050         if (err)
10051                 return err;
10052
10053         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10054         tg3_enable_ints(tp);
10055
10056         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10057                tnapi->coal_now);
10058
10059         for (i = 0; i < 5; i++) {
10060                 u32 int_mbox, misc_host_ctrl;
10061
10062                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10063                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10064
10065                 if ((int_mbox != 0) ||
10066                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10067                         intr_ok = 1;
10068                         break;
10069                 }
10070
10071                 if (tg3_flag(tp, 57765_PLUS) &&
10072                     tnapi->hw_status->status_tag != tnapi->last_tag)
10073                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10074
10075                 msleep(10);
10076         }
10077
10078         tg3_disable_ints(tp);
10079
10080         free_irq(tnapi->irq_vec, tnapi);
10081
10082         err = tg3_request_irq(tp, 0);
10083
10084         if (err)
10085                 return err;
10086
10087         if (intr_ok) {
10088                 /* Reenable MSI one shot mode. */
10089                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10090                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10091                         tw32(MSGINT_MODE, val);
10092                 }
10093                 return 0;
10094         }
10095
10096         return -EIO;
10097 }
10098
10099 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10100  * successfully restored
10101  */
10102 static int tg3_test_msi(struct tg3 *tp)
10103 {
10104         int err;
10105         u16 pci_cmd;
10106
10107         if (!tg3_flag(tp, USING_MSI))
10108                 return 0;
10109
10110         /* Turn off SERR reporting in case MSI terminates with Master
10111          * Abort.
10112          */
10113         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10114         pci_write_config_word(tp->pdev, PCI_COMMAND,
10115                               pci_cmd & ~PCI_COMMAND_SERR);
10116
10117         err = tg3_test_interrupt(tp);
10118
10119         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10120
10121         if (!err)
10122                 return 0;
10123
10124         /* other failures */
10125         if (err != -EIO)
10126                 return err;
10127
10128         /* MSI test failed, go back to INTx mode */
10129         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10130                     "to INTx mode. Please report this failure to the PCI "
10131                     "maintainer and include system chipset information\n");
10132
10133         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10134
10135         pci_disable_msi(tp->pdev);
10136
10137         tg3_flag_clear(tp, USING_MSI);
10138         tp->napi[0].irq_vec = tp->pdev->irq;
10139
10140         err = tg3_request_irq(tp, 0);
10141         if (err)
10142                 return err;
10143
10144         /* Need to reset the chip because the MSI cycle may have terminated
10145          * with Master Abort.
10146          */
10147         tg3_full_lock(tp, 1);
10148
10149         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10150         err = tg3_init_hw(tp, 1);
10151
10152         tg3_full_unlock(tp);
10153
10154         if (err)
10155                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10156
10157         return err;
10158 }
10159
10160 static int tg3_request_firmware(struct tg3 *tp)
10161 {
10162         const __be32 *fw_data;
10163
10164         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10165                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10166                            tp->fw_needed);
10167                 return -ENOENT;
10168         }
10169
10170         fw_data = (void *)tp->fw->data;
10171
10172         /* Firmware blob starts with version numbers, followed by
10173          * start address and _full_ length including BSS sections
10174          * (which must be longer than the actual data, of course
10175          */
10176
10177         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10178         if (tp->fw_len < (tp->fw->size - 12)) {
10179                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10180                            tp->fw_len, tp->fw_needed);
10181                 release_firmware(tp->fw);
10182                 tp->fw = NULL;
10183                 return -EINVAL;
10184         }
10185
10186         /* We no longer need firmware; we have it. */
10187         tp->fw_needed = NULL;
10188         return 0;
10189 }
10190
10191 static u32 tg3_irq_count(struct tg3 *tp)
10192 {
10193         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10194
10195         if (irq_cnt > 1) {
10196                 /* We want as many rx rings enabled as there are cpus.
10197                  * In multiqueue MSI-X mode, the first MSI-X vector
10198                  * only deals with link interrupts, etc, so we add
10199                  * one to the number of vectors we are requesting.
10200                  */
10201                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10202         }
10203
10204         return irq_cnt;
10205 }
10206
10207 static bool tg3_enable_msix(struct tg3 *tp)
10208 {
10209         int i, rc;
10210         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10211
10212         tp->txq_cnt = tp->txq_req;
10213         tp->rxq_cnt = tp->rxq_req;
10214         if (!tp->rxq_cnt)
10215                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10216         if (tp->rxq_cnt > tp->rxq_max)
10217                 tp->rxq_cnt = tp->rxq_max;
10218
10219         /* Disable multiple TX rings by default.  Simple round-robin hardware
10220          * scheduling of the TX rings can cause starvation of rings with
10221          * small packets when other rings have TSO or jumbo packets.
10222          */
10223         if (!tp->txq_req)
10224                 tp->txq_cnt = 1;
10225
10226         tp->irq_cnt = tg3_irq_count(tp);
10227
10228         for (i = 0; i < tp->irq_max; i++) {
10229                 msix_ent[i].entry  = i;
10230                 msix_ent[i].vector = 0;
10231         }
10232
10233         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10234         if (rc < 0) {
10235                 return false;
10236         } else if (rc != 0) {
10237                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10238                         return false;
10239                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10240                               tp->irq_cnt, rc);
10241                 tp->irq_cnt = rc;
10242                 tp->rxq_cnt = max(rc - 1, 1);
10243                 if (tp->txq_cnt)
10244                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10245         }
10246
10247         for (i = 0; i < tp->irq_max; i++)
10248                 tp->napi[i].irq_vec = msix_ent[i].vector;
10249
10250         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10251                 pci_disable_msix(tp->pdev);
10252                 return false;
10253         }
10254
10255         if (tp->irq_cnt == 1)
10256                 return true;
10257
10258         tg3_flag_set(tp, ENABLE_RSS);
10259
10260         if (tp->txq_cnt > 1)
10261                 tg3_flag_set(tp, ENABLE_TSS);
10262
10263         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10264
10265         return true;
10266 }
10267
10268 static void tg3_ints_init(struct tg3 *tp)
10269 {
10270         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10271             !tg3_flag(tp, TAGGED_STATUS)) {
10272                 /* All MSI supporting chips should support tagged
10273                  * status.  Assert that this is the case.
10274                  */
10275                 netdev_warn(tp->dev,
10276                             "MSI without TAGGED_STATUS? Not using MSI\n");
10277                 goto defcfg;
10278         }
10279
10280         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10281                 tg3_flag_set(tp, USING_MSIX);
10282         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10283                 tg3_flag_set(tp, USING_MSI);
10284
10285         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10286                 u32 msi_mode = tr32(MSGINT_MODE);
10287                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10288                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10289                 if (!tg3_flag(tp, 1SHOT_MSI))
10290                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10291                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10292         }
10293 defcfg:
10294         if (!tg3_flag(tp, USING_MSIX)) {
10295                 tp->irq_cnt = 1;
10296                 tp->napi[0].irq_vec = tp->pdev->irq;
10297         }
10298
10299         if (tp->irq_cnt == 1) {
10300                 tp->txq_cnt = 1;
10301                 tp->rxq_cnt = 1;
10302                 netif_set_real_num_tx_queues(tp->dev, 1);
10303                 netif_set_real_num_rx_queues(tp->dev, 1);
10304         }
10305 }
10306
10307 static void tg3_ints_fini(struct tg3 *tp)
10308 {
10309         if (tg3_flag(tp, USING_MSIX))
10310                 pci_disable_msix(tp->pdev);
10311         else if (tg3_flag(tp, USING_MSI))
10312                 pci_disable_msi(tp->pdev);
10313         tg3_flag_clear(tp, USING_MSI);
10314         tg3_flag_clear(tp, USING_MSIX);
10315         tg3_flag_clear(tp, ENABLE_RSS);
10316         tg3_flag_clear(tp, ENABLE_TSS);
10317 }
10318
10319 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10320 {
10321         struct net_device *dev = tp->dev;
10322         int i, err;
10323
10324         /*
10325          * Setup interrupts first so we know how
10326          * many NAPI resources to allocate
10327          */
10328         tg3_ints_init(tp);
10329
10330         tg3_rss_check_indir_tbl(tp);
10331
10332         /* The placement of this call is tied
10333          * to the setup and use of Host TX descriptors.
10334          */
10335         err = tg3_alloc_consistent(tp);
10336         if (err)
10337                 goto err_out1;
10338
10339         tg3_napi_init(tp);
10340
10341         tg3_napi_enable(tp);
10342
10343         for (i = 0; i < tp->irq_cnt; i++) {
10344                 struct tg3_napi *tnapi = &tp->napi[i];
10345                 err = tg3_request_irq(tp, i);
10346                 if (err) {
10347                         for (i--; i >= 0; i--) {
10348                                 tnapi = &tp->napi[i];
10349                                 free_irq(tnapi->irq_vec, tnapi);
10350                         }
10351                         goto err_out2;
10352                 }
10353         }
10354
10355         tg3_full_lock(tp, 0);
10356
10357         err = tg3_init_hw(tp, reset_phy);
10358         if (err) {
10359                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10360                 tg3_free_rings(tp);
10361         }
10362
10363         tg3_full_unlock(tp);
10364
10365         if (err)
10366                 goto err_out3;
10367
10368         if (test_irq && tg3_flag(tp, USING_MSI)) {
10369                 err = tg3_test_msi(tp);
10370
10371                 if (err) {
10372                         tg3_full_lock(tp, 0);
10373                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10374                         tg3_free_rings(tp);
10375                         tg3_full_unlock(tp);
10376
10377                         goto err_out2;
10378                 }
10379
10380                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10381                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10382
10383                         tw32(PCIE_TRANSACTION_CFG,
10384                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10385                 }
10386         }
10387
10388         tg3_phy_start(tp);
10389
10390         tg3_hwmon_open(tp);
10391
10392         tg3_full_lock(tp, 0);
10393
10394         tg3_timer_start(tp);
10395         tg3_flag_set(tp, INIT_COMPLETE);
10396         tg3_enable_ints(tp);
10397
10398         tg3_full_unlock(tp);
10399
10400         netif_tx_start_all_queues(dev);
10401
10402         /*
10403          * Reset loopback feature if it was turned on while the device was down
10404          * make sure that it's installed properly now.
10405          */
10406         if (dev->features & NETIF_F_LOOPBACK)
10407                 tg3_set_loopback(dev, dev->features);
10408
10409         return 0;
10410
10411 err_out3:
10412         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10413                 struct tg3_napi *tnapi = &tp->napi[i];
10414                 free_irq(tnapi->irq_vec, tnapi);
10415         }
10416
10417 err_out2:
10418         tg3_napi_disable(tp);
10419         tg3_napi_fini(tp);
10420         tg3_free_consistent(tp);
10421
10422 err_out1:
10423         tg3_ints_fini(tp);
10424
10425         return err;
10426 }
10427
10428 static void tg3_stop(struct tg3 *tp)
10429 {
10430         int i;
10431
10432         tg3_napi_disable(tp);
10433         tg3_reset_task_cancel(tp);
10434
10435         netif_tx_disable(tp->dev);
10436
10437         tg3_timer_stop(tp);
10438
10439         tg3_hwmon_close(tp);
10440
10441         tg3_phy_stop(tp);
10442
10443         tg3_full_lock(tp, 1);
10444
10445         tg3_disable_ints(tp);
10446
10447         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10448         tg3_free_rings(tp);
10449         tg3_flag_clear(tp, INIT_COMPLETE);
10450
10451         tg3_full_unlock(tp);
10452
10453         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10454                 struct tg3_napi *tnapi = &tp->napi[i];
10455                 free_irq(tnapi->irq_vec, tnapi);
10456         }
10457
10458         tg3_ints_fini(tp);
10459
10460         tg3_napi_fini(tp);
10461
10462         tg3_free_consistent(tp);
10463 }
10464
10465 static int tg3_open(struct net_device *dev)
10466 {
10467         struct tg3 *tp = netdev_priv(dev);
10468         int err;
10469
10470         if (tp->fw_needed) {
10471                 err = tg3_request_firmware(tp);
10472                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10473                         if (err)
10474                                 return err;
10475                 } else if (err) {
10476                         netdev_warn(tp->dev, "TSO capability disabled\n");
10477                         tg3_flag_clear(tp, TSO_CAPABLE);
10478                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10479                         netdev_notice(tp->dev, "TSO capability restored\n");
10480                         tg3_flag_set(tp, TSO_CAPABLE);
10481                 }
10482         }
10483
10484         netif_carrier_off(tp->dev);
10485
10486         err = tg3_power_up(tp);
10487         if (err)
10488                 return err;
10489
10490         tg3_full_lock(tp, 0);
10491
10492         tg3_disable_ints(tp);
10493         tg3_flag_clear(tp, INIT_COMPLETE);
10494
10495         tg3_full_unlock(tp);
10496
10497         err = tg3_start(tp, true, true);
10498         if (err) {
10499                 tg3_frob_aux_power(tp, false);
10500                 pci_set_power_state(tp->pdev, PCI_D3hot);
10501         }
10502         return err;
10503 }
10504
10505 static int tg3_close(struct net_device *dev)
10506 {
10507         struct tg3 *tp = netdev_priv(dev);
10508
10509         tg3_stop(tp);
10510
10511         /* Clear stats across close / open calls */
10512         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10513         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10514
10515         tg3_power_down(tp);
10516
10517         netif_carrier_off(tp->dev);
10518
10519         return 0;
10520 }
10521
10522 static inline u64 get_stat64(tg3_stat64_t *val)
10523 {
10524        return ((u64)val->high << 32) | ((u64)val->low);
10525 }
10526
10527 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10528 {
10529         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10530
10531         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10532             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10533              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10534                 u32 val;
10535
10536                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10537                         tg3_writephy(tp, MII_TG3_TEST1,
10538                                      val | MII_TG3_TEST1_CRC_EN);
10539                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10540                 } else
10541                         val = 0;
10542
10543                 tp->phy_crc_errors += val;
10544
10545                 return tp->phy_crc_errors;
10546         }
10547
10548         return get_stat64(&hw_stats->rx_fcs_errors);
10549 }
10550
10551 #define ESTAT_ADD(member) \
10552         estats->member =        old_estats->member + \
10553                                 get_stat64(&hw_stats->member)
10554
10555 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10556 {
10557         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10558         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10559
10560         ESTAT_ADD(rx_octets);
10561         ESTAT_ADD(rx_fragments);
10562         ESTAT_ADD(rx_ucast_packets);
10563         ESTAT_ADD(rx_mcast_packets);
10564         ESTAT_ADD(rx_bcast_packets);
10565         ESTAT_ADD(rx_fcs_errors);
10566         ESTAT_ADD(rx_align_errors);
10567         ESTAT_ADD(rx_xon_pause_rcvd);
10568         ESTAT_ADD(rx_xoff_pause_rcvd);
10569         ESTAT_ADD(rx_mac_ctrl_rcvd);
10570         ESTAT_ADD(rx_xoff_entered);
10571         ESTAT_ADD(rx_frame_too_long_errors);
10572         ESTAT_ADD(rx_jabbers);
10573         ESTAT_ADD(rx_undersize_packets);
10574         ESTAT_ADD(rx_in_length_errors);
10575         ESTAT_ADD(rx_out_length_errors);
10576         ESTAT_ADD(rx_64_or_less_octet_packets);
10577         ESTAT_ADD(rx_65_to_127_octet_packets);
10578         ESTAT_ADD(rx_128_to_255_octet_packets);
10579         ESTAT_ADD(rx_256_to_511_octet_packets);
10580         ESTAT_ADD(rx_512_to_1023_octet_packets);
10581         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10582         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10583         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10584         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10585         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10586
10587         ESTAT_ADD(tx_octets);
10588         ESTAT_ADD(tx_collisions);
10589         ESTAT_ADD(tx_xon_sent);
10590         ESTAT_ADD(tx_xoff_sent);
10591         ESTAT_ADD(tx_flow_control);
10592         ESTAT_ADD(tx_mac_errors);
10593         ESTAT_ADD(tx_single_collisions);
10594         ESTAT_ADD(tx_mult_collisions);
10595         ESTAT_ADD(tx_deferred);
10596         ESTAT_ADD(tx_excessive_collisions);
10597         ESTAT_ADD(tx_late_collisions);
10598         ESTAT_ADD(tx_collide_2times);
10599         ESTAT_ADD(tx_collide_3times);
10600         ESTAT_ADD(tx_collide_4times);
10601         ESTAT_ADD(tx_collide_5times);
10602         ESTAT_ADD(tx_collide_6times);
10603         ESTAT_ADD(tx_collide_7times);
10604         ESTAT_ADD(tx_collide_8times);
10605         ESTAT_ADD(tx_collide_9times);
10606         ESTAT_ADD(tx_collide_10times);
10607         ESTAT_ADD(tx_collide_11times);
10608         ESTAT_ADD(tx_collide_12times);
10609         ESTAT_ADD(tx_collide_13times);
10610         ESTAT_ADD(tx_collide_14times);
10611         ESTAT_ADD(tx_collide_15times);
10612         ESTAT_ADD(tx_ucast_packets);
10613         ESTAT_ADD(tx_mcast_packets);
10614         ESTAT_ADD(tx_bcast_packets);
10615         ESTAT_ADD(tx_carrier_sense_errors);
10616         ESTAT_ADD(tx_discards);
10617         ESTAT_ADD(tx_errors);
10618
10619         ESTAT_ADD(dma_writeq_full);
10620         ESTAT_ADD(dma_write_prioq_full);
10621         ESTAT_ADD(rxbds_empty);
10622         ESTAT_ADD(rx_discards);
10623         ESTAT_ADD(rx_errors);
10624         ESTAT_ADD(rx_threshold_hit);
10625
10626         ESTAT_ADD(dma_readq_full);
10627         ESTAT_ADD(dma_read_prioq_full);
10628         ESTAT_ADD(tx_comp_queue_full);
10629
10630         ESTAT_ADD(ring_set_send_prod_index);
10631         ESTAT_ADD(ring_status_update);
10632         ESTAT_ADD(nic_irqs);
10633         ESTAT_ADD(nic_avoided_irqs);
10634         ESTAT_ADD(nic_tx_threshold_hit);
10635
10636         ESTAT_ADD(mbuf_lwm_thresh_hit);
10637 }
10638
10639 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10640 {
10641         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10642         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10643
10644         stats->rx_packets = old_stats->rx_packets +
10645                 get_stat64(&hw_stats->rx_ucast_packets) +
10646                 get_stat64(&hw_stats->rx_mcast_packets) +
10647                 get_stat64(&hw_stats->rx_bcast_packets);
10648
10649         stats->tx_packets = old_stats->tx_packets +
10650                 get_stat64(&hw_stats->tx_ucast_packets) +
10651                 get_stat64(&hw_stats->tx_mcast_packets) +
10652                 get_stat64(&hw_stats->tx_bcast_packets);
10653
10654         stats->rx_bytes = old_stats->rx_bytes +
10655                 get_stat64(&hw_stats->rx_octets);
10656         stats->tx_bytes = old_stats->tx_bytes +
10657                 get_stat64(&hw_stats->tx_octets);
10658
10659         stats->rx_errors = old_stats->rx_errors +
10660                 get_stat64(&hw_stats->rx_errors);
10661         stats->tx_errors = old_stats->tx_errors +
10662                 get_stat64(&hw_stats->tx_errors) +
10663                 get_stat64(&hw_stats->tx_mac_errors) +
10664                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10665                 get_stat64(&hw_stats->tx_discards);
10666
10667         stats->multicast = old_stats->multicast +
10668                 get_stat64(&hw_stats->rx_mcast_packets);
10669         stats->collisions = old_stats->collisions +
10670                 get_stat64(&hw_stats->tx_collisions);
10671
10672         stats->rx_length_errors = old_stats->rx_length_errors +
10673                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10674                 get_stat64(&hw_stats->rx_undersize_packets);
10675
10676         stats->rx_over_errors = old_stats->rx_over_errors +
10677                 get_stat64(&hw_stats->rxbds_empty);
10678         stats->rx_frame_errors = old_stats->rx_frame_errors +
10679                 get_stat64(&hw_stats->rx_align_errors);
10680         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10681                 get_stat64(&hw_stats->tx_discards);
10682         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10683                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10684
10685         stats->rx_crc_errors = old_stats->rx_crc_errors +
10686                 tg3_calc_crc_errors(tp);
10687
10688         stats->rx_missed_errors = old_stats->rx_missed_errors +
10689                 get_stat64(&hw_stats->rx_discards);
10690
10691         stats->rx_dropped = tp->rx_dropped;
10692         stats->tx_dropped = tp->tx_dropped;
10693 }
10694
10695 static int tg3_get_regs_len(struct net_device *dev)
10696 {
10697         return TG3_REG_BLK_SIZE;
10698 }
10699
10700 static void tg3_get_regs(struct net_device *dev,
10701                 struct ethtool_regs *regs, void *_p)
10702 {
10703         struct tg3 *tp = netdev_priv(dev);
10704
10705         regs->version = 0;
10706
10707         memset(_p, 0, TG3_REG_BLK_SIZE);
10708
10709         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10710                 return;
10711
10712         tg3_full_lock(tp, 0);
10713
10714         tg3_dump_legacy_regs(tp, (u32 *)_p);
10715
10716         tg3_full_unlock(tp);
10717 }
10718
10719 static int tg3_get_eeprom_len(struct net_device *dev)
10720 {
10721         struct tg3 *tp = netdev_priv(dev);
10722
10723         return tp->nvram_size;
10724 }
10725
10726 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10727 {
10728         struct tg3 *tp = netdev_priv(dev);
10729         int ret;
10730         u8  *pd;
10731         u32 i, offset, len, b_offset, b_count;
10732         __be32 val;
10733
10734         if (tg3_flag(tp, NO_NVRAM))
10735                 return -EINVAL;
10736
10737         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10738                 return -EAGAIN;
10739
10740         offset = eeprom->offset;
10741         len = eeprom->len;
10742         eeprom->len = 0;
10743
10744         eeprom->magic = TG3_EEPROM_MAGIC;
10745
10746         if (offset & 3) {
10747                 /* adjustments to start on required 4 byte boundary */
10748                 b_offset = offset & 3;
10749                 b_count = 4 - b_offset;
10750                 if (b_count > len) {
10751                         /* i.e. offset=1 len=2 */
10752                         b_count = len;
10753                 }
10754                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10755                 if (ret)
10756                         return ret;
10757                 memcpy(data, ((char *)&val) + b_offset, b_count);
10758                 len -= b_count;
10759                 offset += b_count;
10760                 eeprom->len += b_count;
10761         }
10762
10763         /* read bytes up to the last 4 byte boundary */
10764         pd = &data[eeprom->len];
10765         for (i = 0; i < (len - (len & 3)); i += 4) {
10766                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10767                 if (ret) {
10768                         eeprom->len += i;
10769                         return ret;
10770                 }
10771                 memcpy(pd + i, &val, 4);
10772         }
10773         eeprom->len += i;
10774
10775         if (len & 3) {
10776                 /* read last bytes not ending on 4 byte boundary */
10777                 pd = &data[eeprom->len];
10778                 b_count = len & 3;
10779                 b_offset = offset + len - b_count;
10780                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10781                 if (ret)
10782                         return ret;
10783                 memcpy(pd, &val, b_count);
10784                 eeprom->len += b_count;
10785         }
10786         return 0;
10787 }
10788
10789 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10790 {
10791         struct tg3 *tp = netdev_priv(dev);
10792         int ret;
10793         u32 offset, len, b_offset, odd_len;
10794         u8 *buf;
10795         __be32 start, end;
10796
10797         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10798                 return -EAGAIN;
10799
10800         if (tg3_flag(tp, NO_NVRAM) ||
10801             eeprom->magic != TG3_EEPROM_MAGIC)
10802                 return -EINVAL;
10803
10804         offset = eeprom->offset;
10805         len = eeprom->len;
10806
10807         if ((b_offset = (offset & 3))) {
10808                 /* adjustments to start on required 4 byte boundary */
10809                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10810                 if (ret)
10811                         return ret;
10812                 len += b_offset;
10813                 offset &= ~3;
10814                 if (len < 4)
10815                         len = 4;
10816         }
10817
10818         odd_len = 0;
10819         if (len & 3) {
10820                 /* adjustments to end on required 4 byte boundary */
10821                 odd_len = 1;
10822                 len = (len + 3) & ~3;
10823                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10824                 if (ret)
10825                         return ret;
10826         }
10827
10828         buf = data;
10829         if (b_offset || odd_len) {
10830                 buf = kmalloc(len, GFP_KERNEL);
10831                 if (!buf)
10832                         return -ENOMEM;
10833                 if (b_offset)
10834                         memcpy(buf, &start, 4);
10835                 if (odd_len)
10836                         memcpy(buf+len-4, &end, 4);
10837                 memcpy(buf + b_offset, data, eeprom->len);
10838         }
10839
10840         ret = tg3_nvram_write_block(tp, offset, len, buf);
10841
10842         if (buf != data)
10843                 kfree(buf);
10844
10845         return ret;
10846 }
10847
10848 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10849 {
10850         struct tg3 *tp = netdev_priv(dev);
10851
10852         if (tg3_flag(tp, USE_PHYLIB)) {
10853                 struct phy_device *phydev;
10854                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10855                         return -EAGAIN;
10856                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10857                 return phy_ethtool_gset(phydev, cmd);
10858         }
10859
10860         cmd->supported = (SUPPORTED_Autoneg);
10861
10862         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10863                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10864                                    SUPPORTED_1000baseT_Full);
10865
10866         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10867                 cmd->supported |= (SUPPORTED_100baseT_Half |
10868                                   SUPPORTED_100baseT_Full |
10869                                   SUPPORTED_10baseT_Half |
10870                                   SUPPORTED_10baseT_Full |
10871                                   SUPPORTED_TP);
10872                 cmd->port = PORT_TP;
10873         } else {
10874                 cmd->supported |= SUPPORTED_FIBRE;
10875                 cmd->port = PORT_FIBRE;
10876         }
10877
10878         cmd->advertising = tp->link_config.advertising;
10879         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10880                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10881                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10882                                 cmd->advertising |= ADVERTISED_Pause;
10883                         } else {
10884                                 cmd->advertising |= ADVERTISED_Pause |
10885                                                     ADVERTISED_Asym_Pause;
10886                         }
10887                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10888                         cmd->advertising |= ADVERTISED_Asym_Pause;
10889                 }
10890         }
10891         if (netif_running(dev) && netif_carrier_ok(dev)) {
10892                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10893                 cmd->duplex = tp->link_config.active_duplex;
10894                 cmd->lp_advertising = tp->link_config.rmt_adv;
10895                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10896                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10897                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10898                         else
10899                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10900                 }
10901         } else {
10902                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10903                 cmd->duplex = DUPLEX_UNKNOWN;
10904                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10905         }
10906         cmd->phy_address = tp->phy_addr;
10907         cmd->transceiver = XCVR_INTERNAL;
10908         cmd->autoneg = tp->link_config.autoneg;
10909         cmd->maxtxpkt = 0;
10910         cmd->maxrxpkt = 0;
10911         return 0;
10912 }
10913
10914 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10915 {
10916         struct tg3 *tp = netdev_priv(dev);
10917         u32 speed = ethtool_cmd_speed(cmd);
10918
10919         if (tg3_flag(tp, USE_PHYLIB)) {
10920                 struct phy_device *phydev;
10921                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10922                         return -EAGAIN;
10923                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10924                 return phy_ethtool_sset(phydev, cmd);
10925         }
10926
10927         if (cmd->autoneg != AUTONEG_ENABLE &&
10928             cmd->autoneg != AUTONEG_DISABLE)
10929                 return -EINVAL;
10930
10931         if (cmd->autoneg == AUTONEG_DISABLE &&
10932             cmd->duplex != DUPLEX_FULL &&
10933             cmd->duplex != DUPLEX_HALF)
10934                 return -EINVAL;
10935
10936         if (cmd->autoneg == AUTONEG_ENABLE) {
10937                 u32 mask = ADVERTISED_Autoneg |
10938                            ADVERTISED_Pause |
10939                            ADVERTISED_Asym_Pause;
10940
10941                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10942                         mask |= ADVERTISED_1000baseT_Half |
10943                                 ADVERTISED_1000baseT_Full;
10944
10945                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10946                         mask |= ADVERTISED_100baseT_Half |
10947                                 ADVERTISED_100baseT_Full |
10948                                 ADVERTISED_10baseT_Half |
10949                                 ADVERTISED_10baseT_Full |
10950                                 ADVERTISED_TP;
10951                 else
10952                         mask |= ADVERTISED_FIBRE;
10953
10954                 if (cmd->advertising & ~mask)
10955                         return -EINVAL;
10956
10957                 mask &= (ADVERTISED_1000baseT_Half |
10958                          ADVERTISED_1000baseT_Full |
10959                          ADVERTISED_100baseT_Half |
10960                          ADVERTISED_100baseT_Full |
10961                          ADVERTISED_10baseT_Half |
10962                          ADVERTISED_10baseT_Full);
10963
10964                 cmd->advertising &= mask;
10965         } else {
10966                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10967                         if (speed != SPEED_1000)
10968                                 return -EINVAL;
10969
10970                         if (cmd->duplex != DUPLEX_FULL)
10971                                 return -EINVAL;
10972                 } else {
10973                         if (speed != SPEED_100 &&
10974                             speed != SPEED_10)
10975                                 return -EINVAL;
10976                 }
10977         }
10978
10979         tg3_full_lock(tp, 0);
10980
10981         tp->link_config.autoneg = cmd->autoneg;
10982         if (cmd->autoneg == AUTONEG_ENABLE) {
10983                 tp->link_config.advertising = (cmd->advertising |
10984                                               ADVERTISED_Autoneg);
10985                 tp->link_config.speed = SPEED_UNKNOWN;
10986                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10987         } else {
10988                 tp->link_config.advertising = 0;
10989                 tp->link_config.speed = speed;
10990                 tp->link_config.duplex = cmd->duplex;
10991         }
10992
10993         if (netif_running(dev))
10994                 tg3_setup_phy(tp, 1);
10995
10996         tg3_full_unlock(tp);
10997
10998         return 0;
10999 }
11000
11001 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11002 {
11003         struct tg3 *tp = netdev_priv(dev);
11004
11005         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11006         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11007         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11008         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11009 }
11010
11011 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11012 {
11013         struct tg3 *tp = netdev_priv(dev);
11014
11015         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11016                 wol->supported = WAKE_MAGIC;
11017         else
11018                 wol->supported = 0;
11019         wol->wolopts = 0;
11020         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11021                 wol->wolopts = WAKE_MAGIC;
11022         memset(&wol->sopass, 0, sizeof(wol->sopass));
11023 }
11024
11025 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11026 {
11027         struct tg3 *tp = netdev_priv(dev);
11028         struct device *dp = &tp->pdev->dev;
11029
11030         if (wol->wolopts & ~WAKE_MAGIC)
11031                 return -EINVAL;
11032         if ((wol->wolopts & WAKE_MAGIC) &&
11033             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11034                 return -EINVAL;
11035
11036         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11037
11038         spin_lock_bh(&tp->lock);
11039         if (device_may_wakeup(dp))
11040                 tg3_flag_set(tp, WOL_ENABLE);
11041         else
11042                 tg3_flag_clear(tp, WOL_ENABLE);
11043         spin_unlock_bh(&tp->lock);
11044
11045         return 0;
11046 }
11047
11048 static u32 tg3_get_msglevel(struct net_device *dev)
11049 {
11050         struct tg3 *tp = netdev_priv(dev);
11051         return tp->msg_enable;
11052 }
11053
11054 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11055 {
11056         struct tg3 *tp = netdev_priv(dev);
11057         tp->msg_enable = value;
11058 }
11059
11060 static int tg3_nway_reset(struct net_device *dev)
11061 {
11062         struct tg3 *tp = netdev_priv(dev);
11063         int r;
11064
11065         if (!netif_running(dev))
11066                 return -EAGAIN;
11067
11068         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11069                 return -EINVAL;
11070
11071         if (tg3_flag(tp, USE_PHYLIB)) {
11072                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11073                         return -EAGAIN;
11074                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11075         } else {
11076                 u32 bmcr;
11077
11078                 spin_lock_bh(&tp->lock);
11079                 r = -EINVAL;
11080                 tg3_readphy(tp, MII_BMCR, &bmcr);
11081                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11082                     ((bmcr & BMCR_ANENABLE) ||
11083                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11084                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11085                                                    BMCR_ANENABLE);
11086                         r = 0;
11087                 }
11088                 spin_unlock_bh(&tp->lock);
11089         }
11090
11091         return r;
11092 }
11093
11094 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11095 {
11096         struct tg3 *tp = netdev_priv(dev);
11097
11098         ering->rx_max_pending = tp->rx_std_ring_mask;
11099         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11100                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11101         else
11102                 ering->rx_jumbo_max_pending = 0;
11103
11104         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11105
11106         ering->rx_pending = tp->rx_pending;
11107         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11108                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11109         else
11110                 ering->rx_jumbo_pending = 0;
11111
11112         ering->tx_pending = tp->napi[0].tx_pending;
11113 }
11114
11115 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11116 {
11117         struct tg3 *tp = netdev_priv(dev);
11118         int i, irq_sync = 0, err = 0;
11119
11120         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11121             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11122             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11123             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11124             (tg3_flag(tp, TSO_BUG) &&
11125              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11126                 return -EINVAL;
11127
11128         if (netif_running(dev)) {
11129                 tg3_phy_stop(tp);
11130                 tg3_netif_stop(tp);
11131                 irq_sync = 1;
11132         }
11133
11134         tg3_full_lock(tp, irq_sync);
11135
11136         tp->rx_pending = ering->rx_pending;
11137
11138         if (tg3_flag(tp, MAX_RXPEND_64) &&
11139             tp->rx_pending > 63)
11140                 tp->rx_pending = 63;
11141         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11142
11143         for (i = 0; i < tp->irq_max; i++)
11144                 tp->napi[i].tx_pending = ering->tx_pending;
11145
11146         if (netif_running(dev)) {
11147                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11148                 err = tg3_restart_hw(tp, 1);
11149                 if (!err)
11150                         tg3_netif_start(tp);
11151         }
11152
11153         tg3_full_unlock(tp);
11154
11155         if (irq_sync && !err)
11156                 tg3_phy_start(tp);
11157
11158         return err;
11159 }
11160
11161 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11162 {
11163         struct tg3 *tp = netdev_priv(dev);
11164
11165         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11166
11167         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11168                 epause->rx_pause = 1;
11169         else
11170                 epause->rx_pause = 0;
11171
11172         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11173                 epause->tx_pause = 1;
11174         else
11175                 epause->tx_pause = 0;
11176 }
11177
11178 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11179 {
11180         struct tg3 *tp = netdev_priv(dev);
11181         int err = 0;
11182
11183         if (tg3_flag(tp, USE_PHYLIB)) {
11184                 u32 newadv;
11185                 struct phy_device *phydev;
11186
11187                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11188
11189                 if (!(phydev->supported & SUPPORTED_Pause) ||
11190                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11191                      (epause->rx_pause != epause->tx_pause)))
11192                         return -EINVAL;
11193
11194                 tp->link_config.flowctrl = 0;
11195                 if (epause->rx_pause) {
11196                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11197
11198                         if (epause->tx_pause) {
11199                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11200                                 newadv = ADVERTISED_Pause;
11201                         } else
11202                                 newadv = ADVERTISED_Pause |
11203                                          ADVERTISED_Asym_Pause;
11204                 } else if (epause->tx_pause) {
11205                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11206                         newadv = ADVERTISED_Asym_Pause;
11207                 } else
11208                         newadv = 0;
11209
11210                 if (epause->autoneg)
11211                         tg3_flag_set(tp, PAUSE_AUTONEG);
11212                 else
11213                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11214
11215                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11216                         u32 oldadv = phydev->advertising &
11217                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11218                         if (oldadv != newadv) {
11219                                 phydev->advertising &=
11220                                         ~(ADVERTISED_Pause |
11221                                           ADVERTISED_Asym_Pause);
11222                                 phydev->advertising |= newadv;
11223                                 if (phydev->autoneg) {
11224                                         /*
11225                                          * Always renegotiate the link to
11226                                          * inform our link partner of our
11227                                          * flow control settings, even if the
11228                                          * flow control is forced.  Let
11229                                          * tg3_adjust_link() do the final
11230                                          * flow control setup.
11231                                          */
11232                                         return phy_start_aneg(phydev);
11233                                 }
11234                         }
11235
11236                         if (!epause->autoneg)
11237                                 tg3_setup_flow_control(tp, 0, 0);
11238                 } else {
11239                         tp->link_config.advertising &=
11240                                         ~(ADVERTISED_Pause |
11241                                           ADVERTISED_Asym_Pause);
11242                         tp->link_config.advertising |= newadv;
11243                 }
11244         } else {
11245                 int irq_sync = 0;
11246
11247                 if (netif_running(dev)) {
11248                         tg3_netif_stop(tp);
11249                         irq_sync = 1;
11250                 }
11251
11252                 tg3_full_lock(tp, irq_sync);
11253
11254                 if (epause->autoneg)
11255                         tg3_flag_set(tp, PAUSE_AUTONEG);
11256                 else
11257                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11258                 if (epause->rx_pause)
11259                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11260                 else
11261                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11262                 if (epause->tx_pause)
11263                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11264                 else
11265                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11266
11267                 if (netif_running(dev)) {
11268                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11269                         err = tg3_restart_hw(tp, 1);
11270                         if (!err)
11271                                 tg3_netif_start(tp);
11272                 }
11273
11274                 tg3_full_unlock(tp);
11275         }
11276
11277         return err;
11278 }
11279
11280 static int tg3_get_sset_count(struct net_device *dev, int sset)
11281 {
11282         switch (sset) {
11283         case ETH_SS_TEST:
11284                 return TG3_NUM_TEST;
11285         case ETH_SS_STATS:
11286                 return TG3_NUM_STATS;
11287         default:
11288                 return -EOPNOTSUPP;
11289         }
11290 }
11291
11292 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11293                          u32 *rules __always_unused)
11294 {
11295         struct tg3 *tp = netdev_priv(dev);
11296
11297         if (!tg3_flag(tp, SUPPORT_MSIX))
11298                 return -EOPNOTSUPP;
11299
11300         switch (info->cmd) {
11301         case ETHTOOL_GRXRINGS:
11302                 if (netif_running(tp->dev))
11303                         info->data = tp->rxq_cnt;
11304                 else {
11305                         info->data = num_online_cpus();
11306                         if (info->data > TG3_RSS_MAX_NUM_QS)
11307                                 info->data = TG3_RSS_MAX_NUM_QS;
11308                 }
11309
11310                 /* The first interrupt vector only
11311                  * handles link interrupts.
11312                  */
11313                 info->data -= 1;
11314                 return 0;
11315
11316         default:
11317                 return -EOPNOTSUPP;
11318         }
11319 }
11320
11321 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11322 {
11323         u32 size = 0;
11324         struct tg3 *tp = netdev_priv(dev);
11325
11326         if (tg3_flag(tp, SUPPORT_MSIX))
11327                 size = TG3_RSS_INDIR_TBL_SIZE;
11328
11329         return size;
11330 }
11331
11332 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11333 {
11334         struct tg3 *tp = netdev_priv(dev);
11335         int i;
11336
11337         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11338                 indir[i] = tp->rss_ind_tbl[i];
11339
11340         return 0;
11341 }
11342
11343 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11344 {
11345         struct tg3 *tp = netdev_priv(dev);
11346         size_t i;
11347
11348         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11349                 tp->rss_ind_tbl[i] = indir[i];
11350
11351         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11352                 return 0;
11353
11354         /* It is legal to write the indirection
11355          * table while the device is running.
11356          */
11357         tg3_full_lock(tp, 0);
11358         tg3_rss_write_indir_tbl(tp);
11359         tg3_full_unlock(tp);
11360
11361         return 0;
11362 }
11363
11364 static void tg3_get_channels(struct net_device *dev,
11365                              struct ethtool_channels *channel)
11366 {
11367         struct tg3 *tp = netdev_priv(dev);
11368         u32 deflt_qs = netif_get_num_default_rss_queues();
11369
11370         channel->max_rx = tp->rxq_max;
11371         channel->max_tx = tp->txq_max;
11372
11373         if (netif_running(dev)) {
11374                 channel->rx_count = tp->rxq_cnt;
11375                 channel->tx_count = tp->txq_cnt;
11376         } else {
11377                 if (tp->rxq_req)
11378                         channel->rx_count = tp->rxq_req;
11379                 else
11380                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11381
11382                 if (tp->txq_req)
11383                         channel->tx_count = tp->txq_req;
11384                 else
11385                         channel->tx_count = min(deflt_qs, tp->txq_max);
11386         }
11387 }
11388
11389 static int tg3_set_channels(struct net_device *dev,
11390                             struct ethtool_channels *channel)
11391 {
11392         struct tg3 *tp = netdev_priv(dev);
11393
11394         if (!tg3_flag(tp, SUPPORT_MSIX))
11395                 return -EOPNOTSUPP;
11396
11397         if (channel->rx_count > tp->rxq_max ||
11398             channel->tx_count > tp->txq_max)
11399                 return -EINVAL;
11400
11401         tp->rxq_req = channel->rx_count;
11402         tp->txq_req = channel->tx_count;
11403
11404         if (!netif_running(dev))
11405                 return 0;
11406
11407         tg3_stop(tp);
11408
11409         netif_carrier_off(dev);
11410
11411         tg3_start(tp, true, false);
11412
11413         return 0;
11414 }
11415
11416 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11417 {
11418         switch (stringset) {
11419         case ETH_SS_STATS:
11420                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11421                 break;
11422         case ETH_SS_TEST:
11423                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11424                 break;
11425         default:
11426                 WARN_ON(1);     /* we need a WARN() */
11427                 break;
11428         }
11429 }
11430
11431 static int tg3_set_phys_id(struct net_device *dev,
11432                             enum ethtool_phys_id_state state)
11433 {
11434         struct tg3 *tp = netdev_priv(dev);
11435
11436         if (!netif_running(tp->dev))
11437                 return -EAGAIN;
11438
11439         switch (state) {
11440         case ETHTOOL_ID_ACTIVE:
11441                 return 1;       /* cycle on/off once per second */
11442
11443         case ETHTOOL_ID_ON:
11444                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11445                      LED_CTRL_1000MBPS_ON |
11446                      LED_CTRL_100MBPS_ON |
11447                      LED_CTRL_10MBPS_ON |
11448                      LED_CTRL_TRAFFIC_OVERRIDE |
11449                      LED_CTRL_TRAFFIC_BLINK |
11450                      LED_CTRL_TRAFFIC_LED);
11451                 break;
11452
11453         case ETHTOOL_ID_OFF:
11454                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11455                      LED_CTRL_TRAFFIC_OVERRIDE);
11456                 break;
11457
11458         case ETHTOOL_ID_INACTIVE:
11459                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11460                 break;
11461         }
11462
11463         return 0;
11464 }
11465
11466 static void tg3_get_ethtool_stats(struct net_device *dev,
11467                                    struct ethtool_stats *estats, u64 *tmp_stats)
11468 {
11469         struct tg3 *tp = netdev_priv(dev);
11470
11471         if (tp->hw_stats)
11472                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11473         else
11474                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11475 }
11476
11477 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11478 {
11479         int i;
11480         __be32 *buf;
11481         u32 offset = 0, len = 0;
11482         u32 magic, val;
11483
11484         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11485                 return NULL;
11486
11487         if (magic == TG3_EEPROM_MAGIC) {
11488                 for (offset = TG3_NVM_DIR_START;
11489                      offset < TG3_NVM_DIR_END;
11490                      offset += TG3_NVM_DIRENT_SIZE) {
11491                         if (tg3_nvram_read(tp, offset, &val))
11492                                 return NULL;
11493
11494                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11495                             TG3_NVM_DIRTYPE_EXTVPD)
11496                                 break;
11497                 }
11498
11499                 if (offset != TG3_NVM_DIR_END) {
11500                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11501                         if (tg3_nvram_read(tp, offset + 4, &offset))
11502                                 return NULL;
11503
11504                         offset = tg3_nvram_logical_addr(tp, offset);
11505                 }
11506         }
11507
11508         if (!offset || !len) {
11509                 offset = TG3_NVM_VPD_OFF;
11510                 len = TG3_NVM_VPD_LEN;
11511         }
11512
11513         buf = kmalloc(len, GFP_KERNEL);
11514         if (buf == NULL)
11515                 return NULL;
11516
11517         if (magic == TG3_EEPROM_MAGIC) {
11518                 for (i = 0; i < len; i += 4) {
11519                         /* The data is in little-endian format in NVRAM.
11520                          * Use the big-endian read routines to preserve
11521                          * the byte order as it exists in NVRAM.
11522                          */
11523                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11524                                 goto error;
11525                 }
11526         } else {
11527                 u8 *ptr;
11528                 ssize_t cnt;
11529                 unsigned int pos = 0;
11530
11531                 ptr = (u8 *)&buf[0];
11532                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11533                         cnt = pci_read_vpd(tp->pdev, pos,
11534                                            len - pos, ptr);
11535                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11536                                 cnt = 0;
11537                         else if (cnt < 0)
11538                                 goto error;
11539                 }
11540                 if (pos != len)
11541                         goto error;
11542         }
11543
11544         *vpdlen = len;
11545
11546         return buf;
11547
11548 error:
11549         kfree(buf);
11550         return NULL;
11551 }
11552
11553 #define NVRAM_TEST_SIZE 0x100
11554 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11555 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11556 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11557 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11558 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11559 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11560 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11561 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11562
11563 static int tg3_test_nvram(struct tg3 *tp)
11564 {
11565         u32 csum, magic, len;
11566         __be32 *buf;
11567         int i, j, k, err = 0, size;
11568
11569         if (tg3_flag(tp, NO_NVRAM))
11570                 return 0;
11571
11572         if (tg3_nvram_read(tp, 0, &magic) != 0)
11573                 return -EIO;
11574
11575         if (magic == TG3_EEPROM_MAGIC)
11576                 size = NVRAM_TEST_SIZE;
11577         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11578                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11579                     TG3_EEPROM_SB_FORMAT_1) {
11580                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11581                         case TG3_EEPROM_SB_REVISION_0:
11582                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11583                                 break;
11584                         case TG3_EEPROM_SB_REVISION_2:
11585                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11586                                 break;
11587                         case TG3_EEPROM_SB_REVISION_3:
11588                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11589                                 break;
11590                         case TG3_EEPROM_SB_REVISION_4:
11591                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11592                                 break;
11593                         case TG3_EEPROM_SB_REVISION_5:
11594                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11595                                 break;
11596                         case TG3_EEPROM_SB_REVISION_6:
11597                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11598                                 break;
11599                         default:
11600                                 return -EIO;
11601                         }
11602                 } else
11603                         return 0;
11604         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11605                 size = NVRAM_SELFBOOT_HW_SIZE;
11606         else
11607                 return -EIO;
11608
11609         buf = kmalloc(size, GFP_KERNEL);
11610         if (buf == NULL)
11611                 return -ENOMEM;
11612
11613         err = -EIO;
11614         for (i = 0, j = 0; i < size; i += 4, j++) {
11615                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11616                 if (err)
11617                         break;
11618         }
11619         if (i < size)
11620                 goto out;
11621
11622         /* Selfboot format */
11623         magic = be32_to_cpu(buf[0]);
11624         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11625             TG3_EEPROM_MAGIC_FW) {
11626                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11627
11628                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11629                     TG3_EEPROM_SB_REVISION_2) {
11630                         /* For rev 2, the csum doesn't include the MBA. */
11631                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11632                                 csum8 += buf8[i];
11633                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11634                                 csum8 += buf8[i];
11635                 } else {
11636                         for (i = 0; i < size; i++)
11637                                 csum8 += buf8[i];
11638                 }
11639
11640                 if (csum8 == 0) {
11641                         err = 0;
11642                         goto out;
11643                 }
11644
11645                 err = -EIO;
11646                 goto out;
11647         }
11648
11649         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11650             TG3_EEPROM_MAGIC_HW) {
11651                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11652                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11653                 u8 *buf8 = (u8 *) buf;
11654
11655                 /* Separate the parity bits and the data bytes.  */
11656                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11657                         if ((i == 0) || (i == 8)) {
11658                                 int l;
11659                                 u8 msk;
11660
11661                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11662                                         parity[k++] = buf8[i] & msk;
11663                                 i++;
11664                         } else if (i == 16) {
11665                                 int l;
11666                                 u8 msk;
11667
11668                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11669                                         parity[k++] = buf8[i] & msk;
11670                                 i++;
11671
11672                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11673                                         parity[k++] = buf8[i] & msk;
11674                                 i++;
11675                         }
11676                         data[j++] = buf8[i];
11677                 }
11678
11679                 err = -EIO;
11680                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11681                         u8 hw8 = hweight8(data[i]);
11682
11683                         if ((hw8 & 0x1) && parity[i])
11684                                 goto out;
11685                         else if (!(hw8 & 0x1) && !parity[i])
11686                                 goto out;
11687                 }
11688                 err = 0;
11689                 goto out;
11690         }
11691
11692         err = -EIO;
11693
11694         /* Bootstrap checksum at offset 0x10 */
11695         csum = calc_crc((unsigned char *) buf, 0x10);
11696         if (csum != le32_to_cpu(buf[0x10/4]))
11697                 goto out;
11698
11699         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11700         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11701         if (csum != le32_to_cpu(buf[0xfc/4]))
11702                 goto out;
11703
11704         kfree(buf);
11705
11706         buf = tg3_vpd_readblock(tp, &len);
11707         if (!buf)
11708                 return -ENOMEM;
11709
11710         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11711         if (i > 0) {
11712                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11713                 if (j < 0)
11714                         goto out;
11715
11716                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11717                         goto out;
11718
11719                 i += PCI_VPD_LRDT_TAG_SIZE;
11720                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11721                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11722                 if (j > 0) {
11723                         u8 csum8 = 0;
11724
11725                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11726
11727                         for (i = 0; i <= j; i++)
11728                                 csum8 += ((u8 *)buf)[i];
11729
11730                         if (csum8)
11731                                 goto out;
11732                 }
11733         }
11734
11735         err = 0;
11736
11737 out:
11738         kfree(buf);
11739         return err;
11740 }
11741
11742 #define TG3_SERDES_TIMEOUT_SEC  2
11743 #define TG3_COPPER_TIMEOUT_SEC  6
11744
11745 static int tg3_test_link(struct tg3 *tp)
11746 {
11747         int i, max;
11748
11749         if (!netif_running(tp->dev))
11750                 return -ENODEV;
11751
11752         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11753                 max = TG3_SERDES_TIMEOUT_SEC;
11754         else
11755                 max = TG3_COPPER_TIMEOUT_SEC;
11756
11757         for (i = 0; i < max; i++) {
11758                 if (netif_carrier_ok(tp->dev))
11759                         return 0;
11760
11761                 if (msleep_interruptible(1000))
11762                         break;
11763         }
11764
11765         return -EIO;
11766 }
11767
11768 /* Only test the commonly used registers */
11769 static int tg3_test_registers(struct tg3 *tp)
11770 {
11771         int i, is_5705, is_5750;
11772         u32 offset, read_mask, write_mask, val, save_val, read_val;
11773         static struct {
11774                 u16 offset;
11775                 u16 flags;
11776 #define TG3_FL_5705     0x1
11777 #define TG3_FL_NOT_5705 0x2
11778 #define TG3_FL_NOT_5788 0x4
11779 #define TG3_FL_NOT_5750 0x8
11780                 u32 read_mask;
11781                 u32 write_mask;
11782         } reg_tbl[] = {
11783                 /* MAC Control Registers */
11784                 { MAC_MODE, TG3_FL_NOT_5705,
11785                         0x00000000, 0x00ef6f8c },
11786                 { MAC_MODE, TG3_FL_5705,
11787                         0x00000000, 0x01ef6b8c },
11788                 { MAC_STATUS, TG3_FL_NOT_5705,
11789                         0x03800107, 0x00000000 },
11790                 { MAC_STATUS, TG3_FL_5705,
11791                         0x03800100, 0x00000000 },
11792                 { MAC_ADDR_0_HIGH, 0x0000,
11793                         0x00000000, 0x0000ffff },
11794                 { MAC_ADDR_0_LOW, 0x0000,
11795                         0x00000000, 0xffffffff },
11796                 { MAC_RX_MTU_SIZE, 0x0000,
11797                         0x00000000, 0x0000ffff },
11798                 { MAC_TX_MODE, 0x0000,
11799                         0x00000000, 0x00000070 },
11800                 { MAC_TX_LENGTHS, 0x0000,
11801                         0x00000000, 0x00003fff },
11802                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11803                         0x00000000, 0x000007fc },
11804                 { MAC_RX_MODE, TG3_FL_5705,
11805                         0x00000000, 0x000007dc },
11806                 { MAC_HASH_REG_0, 0x0000,
11807                         0x00000000, 0xffffffff },
11808                 { MAC_HASH_REG_1, 0x0000,
11809                         0x00000000, 0xffffffff },
11810                 { MAC_HASH_REG_2, 0x0000,
11811                         0x00000000, 0xffffffff },
11812                 { MAC_HASH_REG_3, 0x0000,
11813                         0x00000000, 0xffffffff },
11814
11815                 /* Receive Data and Receive BD Initiator Control Registers. */
11816                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11817                         0x00000000, 0xffffffff },
11818                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11819                         0x00000000, 0xffffffff },
11820                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11821                         0x00000000, 0x00000003 },
11822                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11823                         0x00000000, 0xffffffff },
11824                 { RCVDBDI_STD_BD+0, 0x0000,
11825                         0x00000000, 0xffffffff },
11826                 { RCVDBDI_STD_BD+4, 0x0000,
11827                         0x00000000, 0xffffffff },
11828                 { RCVDBDI_STD_BD+8, 0x0000,
11829                         0x00000000, 0xffff0002 },
11830                 { RCVDBDI_STD_BD+0xc, 0x0000,
11831                         0x00000000, 0xffffffff },
11832
11833                 /* Receive BD Initiator Control Registers. */
11834                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11835                         0x00000000, 0xffffffff },
11836                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11837                         0x00000000, 0x000003ff },
11838                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11839                         0x00000000, 0xffffffff },
11840
11841                 /* Host Coalescing Control Registers. */
11842                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11843                         0x00000000, 0x00000004 },
11844                 { HOSTCC_MODE, TG3_FL_5705,
11845                         0x00000000, 0x000000f6 },
11846                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11847                         0x00000000, 0xffffffff },
11848                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11849                         0x00000000, 0x000003ff },
11850                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11851                         0x00000000, 0xffffffff },
11852                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11853                         0x00000000, 0x000003ff },
11854                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11855                         0x00000000, 0xffffffff },
11856                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11857                         0x00000000, 0x000000ff },
11858                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11859                         0x00000000, 0xffffffff },
11860                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11861                         0x00000000, 0x000000ff },
11862                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11863                         0x00000000, 0xffffffff },
11864                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11865                         0x00000000, 0xffffffff },
11866                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11867                         0x00000000, 0xffffffff },
11868                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11869                         0x00000000, 0x000000ff },
11870                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11871                         0x00000000, 0xffffffff },
11872                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11873                         0x00000000, 0x000000ff },
11874                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11875                         0x00000000, 0xffffffff },
11876                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11877                         0x00000000, 0xffffffff },
11878                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11879                         0x00000000, 0xffffffff },
11880                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11881                         0x00000000, 0xffffffff },
11882                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11883                         0x00000000, 0xffffffff },
11884                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11885                         0xffffffff, 0x00000000 },
11886                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11887                         0xffffffff, 0x00000000 },
11888
11889                 /* Buffer Manager Control Registers. */
11890                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11891                         0x00000000, 0x007fff80 },
11892                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11893                         0x00000000, 0x007fffff },
11894                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11895                         0x00000000, 0x0000003f },
11896                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11897                         0x00000000, 0x000001ff },
11898                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11899                         0x00000000, 0x000001ff },
11900                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11901                         0xffffffff, 0x00000000 },
11902                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11903                         0xffffffff, 0x00000000 },
11904
11905                 /* Mailbox Registers */
11906                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11907                         0x00000000, 0x000001ff },
11908                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11909                         0x00000000, 0x000001ff },
11910                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11911                         0x00000000, 0x000007ff },
11912                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11913                         0x00000000, 0x000001ff },
11914
11915                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11916         };
11917
11918         is_5705 = is_5750 = 0;
11919         if (tg3_flag(tp, 5705_PLUS)) {
11920                 is_5705 = 1;
11921                 if (tg3_flag(tp, 5750_PLUS))
11922                         is_5750 = 1;
11923         }
11924
11925         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11926                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11927                         continue;
11928
11929                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11930                         continue;
11931
11932                 if (tg3_flag(tp, IS_5788) &&
11933                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11934                         continue;
11935
11936                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11937                         continue;
11938
11939                 offset = (u32) reg_tbl[i].offset;
11940                 read_mask = reg_tbl[i].read_mask;
11941                 write_mask = reg_tbl[i].write_mask;
11942
11943                 /* Save the original register content */
11944                 save_val = tr32(offset);
11945
11946                 /* Determine the read-only value. */
11947                 read_val = save_val & read_mask;
11948
11949                 /* Write zero to the register, then make sure the read-only bits
11950                  * are not changed and the read/write bits are all zeros.
11951                  */
11952                 tw32(offset, 0);
11953
11954                 val = tr32(offset);
11955
11956                 /* Test the read-only and read/write bits. */
11957                 if (((val & read_mask) != read_val) || (val & write_mask))
11958                         goto out;
11959
11960                 /* Write ones to all the bits defined by RdMask and WrMask, then
11961                  * make sure the read-only bits are not changed and the
11962                  * read/write bits are all ones.
11963                  */
11964                 tw32(offset, read_mask | write_mask);
11965
11966                 val = tr32(offset);
11967
11968                 /* Test the read-only bits. */
11969                 if ((val & read_mask) != read_val)
11970                         goto out;
11971
11972                 /* Test the read/write bits. */
11973                 if ((val & write_mask) != write_mask)
11974                         goto out;
11975
11976                 tw32(offset, save_val);
11977         }
11978
11979         return 0;
11980
11981 out:
11982         if (netif_msg_hw(tp))
11983                 netdev_err(tp->dev,
11984                            "Register test failed at offset %x\n", offset);
11985         tw32(offset, save_val);
11986         return -EIO;
11987 }
11988
11989 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11990 {
11991         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11992         int i;
11993         u32 j;
11994
11995         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11996                 for (j = 0; j < len; j += 4) {
11997                         u32 val;
11998
11999                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12000                         tg3_read_mem(tp, offset + j, &val);
12001                         if (val != test_pattern[i])
12002                                 return -EIO;
12003                 }
12004         }
12005         return 0;
12006 }
12007
12008 static int tg3_test_memory(struct tg3 *tp)
12009 {
12010         static struct mem_entry {
12011                 u32 offset;
12012                 u32 len;
12013         } mem_tbl_570x[] = {
12014                 { 0x00000000, 0x00b50},
12015                 { 0x00002000, 0x1c000},
12016                 { 0xffffffff, 0x00000}
12017         }, mem_tbl_5705[] = {
12018                 { 0x00000100, 0x0000c},
12019                 { 0x00000200, 0x00008},
12020                 { 0x00004000, 0x00800},
12021                 { 0x00006000, 0x01000},
12022                 { 0x00008000, 0x02000},
12023                 { 0x00010000, 0x0e000},
12024                 { 0xffffffff, 0x00000}
12025         }, mem_tbl_5755[] = {
12026                 { 0x00000200, 0x00008},
12027                 { 0x00004000, 0x00800},
12028                 { 0x00006000, 0x00800},
12029                 { 0x00008000, 0x02000},
12030                 { 0x00010000, 0x0c000},
12031                 { 0xffffffff, 0x00000}
12032         }, mem_tbl_5906[] = {
12033                 { 0x00000200, 0x00008},
12034                 { 0x00004000, 0x00400},
12035                 { 0x00006000, 0x00400},
12036                 { 0x00008000, 0x01000},
12037                 { 0x00010000, 0x01000},
12038                 { 0xffffffff, 0x00000}
12039         }, mem_tbl_5717[] = {
12040                 { 0x00000200, 0x00008},
12041                 { 0x00010000, 0x0a000},
12042                 { 0x00020000, 0x13c00},
12043                 { 0xffffffff, 0x00000}
12044         }, mem_tbl_57765[] = {
12045                 { 0x00000200, 0x00008},
12046                 { 0x00004000, 0x00800},
12047                 { 0x00006000, 0x09800},
12048                 { 0x00010000, 0x0a000},
12049                 { 0xffffffff, 0x00000}
12050         };
12051         struct mem_entry *mem_tbl;
12052         int err = 0;
12053         int i;
12054
12055         if (tg3_flag(tp, 5717_PLUS))
12056                 mem_tbl = mem_tbl_5717;
12057         else if (tg3_flag(tp, 57765_CLASS))
12058                 mem_tbl = mem_tbl_57765;
12059         else if (tg3_flag(tp, 5755_PLUS))
12060                 mem_tbl = mem_tbl_5755;
12061         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12062                 mem_tbl = mem_tbl_5906;
12063         else if (tg3_flag(tp, 5705_PLUS))
12064                 mem_tbl = mem_tbl_5705;
12065         else
12066                 mem_tbl = mem_tbl_570x;
12067
12068         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12069                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12070                 if (err)
12071                         break;
12072         }
12073
12074         return err;
12075 }
12076
12077 #define TG3_TSO_MSS             500
12078
12079 #define TG3_TSO_IP_HDR_LEN      20
12080 #define TG3_TSO_TCP_HDR_LEN     20
12081 #define TG3_TSO_TCP_OPT_LEN     12
12082
12083 static const u8 tg3_tso_header[] = {
12084 0x08, 0x00,
12085 0x45, 0x00, 0x00, 0x00,
12086 0x00, 0x00, 0x40, 0x00,
12087 0x40, 0x06, 0x00, 0x00,
12088 0x0a, 0x00, 0x00, 0x01,
12089 0x0a, 0x00, 0x00, 0x02,
12090 0x0d, 0x00, 0xe0, 0x00,
12091 0x00, 0x00, 0x01, 0x00,
12092 0x00, 0x00, 0x02, 0x00,
12093 0x80, 0x10, 0x10, 0x00,
12094 0x14, 0x09, 0x00, 0x00,
12095 0x01, 0x01, 0x08, 0x0a,
12096 0x11, 0x11, 0x11, 0x11,
12097 0x11, 0x11, 0x11, 0x11,
12098 };
12099
12100 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12101 {
12102         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12103         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12104         u32 budget;
12105         struct sk_buff *skb;
12106         u8 *tx_data, *rx_data;
12107         dma_addr_t map;
12108         int num_pkts, tx_len, rx_len, i, err;
12109         struct tg3_rx_buffer_desc *desc;
12110         struct tg3_napi *tnapi, *rnapi;
12111         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12112
12113         tnapi = &tp->napi[0];
12114         rnapi = &tp->napi[0];
12115         if (tp->irq_cnt > 1) {
12116                 if (tg3_flag(tp, ENABLE_RSS))
12117                         rnapi = &tp->napi[1];
12118                 if (tg3_flag(tp, ENABLE_TSS))
12119                         tnapi = &tp->napi[1];
12120         }
12121         coal_now = tnapi->coal_now | rnapi->coal_now;
12122
12123         err = -EIO;
12124
12125         tx_len = pktsz;
12126         skb = netdev_alloc_skb(tp->dev, tx_len);
12127         if (!skb)
12128                 return -ENOMEM;
12129
12130         tx_data = skb_put(skb, tx_len);
12131         memcpy(tx_data, tp->dev->dev_addr, 6);
12132         memset(tx_data + 6, 0x0, 8);
12133
12134         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12135
12136         if (tso_loopback) {
12137                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12138
12139                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12140                               TG3_TSO_TCP_OPT_LEN;
12141
12142                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12143                        sizeof(tg3_tso_header));
12144                 mss = TG3_TSO_MSS;
12145
12146                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12147                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12148
12149                 /* Set the total length field in the IP header */
12150                 iph->tot_len = htons((u16)(mss + hdr_len));
12151
12152                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12153                               TXD_FLAG_CPU_POST_DMA);
12154
12155                 if (tg3_flag(tp, HW_TSO_1) ||
12156                     tg3_flag(tp, HW_TSO_2) ||
12157                     tg3_flag(tp, HW_TSO_3)) {
12158                         struct tcphdr *th;
12159                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12160                         th = (struct tcphdr *)&tx_data[val];
12161                         th->check = 0;
12162                 } else
12163                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12164
12165                 if (tg3_flag(tp, HW_TSO_3)) {
12166                         mss |= (hdr_len & 0xc) << 12;
12167                         if (hdr_len & 0x10)
12168                                 base_flags |= 0x00000010;
12169                         base_flags |= (hdr_len & 0x3e0) << 5;
12170                 } else if (tg3_flag(tp, HW_TSO_2))
12171                         mss |= hdr_len << 9;
12172                 else if (tg3_flag(tp, HW_TSO_1) ||
12173                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12174                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12175                 } else {
12176                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12177                 }
12178
12179                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12180         } else {
12181                 num_pkts = 1;
12182                 data_off = ETH_HLEN;
12183
12184                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12185                     tx_len > VLAN_ETH_FRAME_LEN)
12186                         base_flags |= TXD_FLAG_JMB_PKT;
12187         }
12188
12189         for (i = data_off; i < tx_len; i++)
12190                 tx_data[i] = (u8) (i & 0xff);
12191
12192         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12193         if (pci_dma_mapping_error(tp->pdev, map)) {
12194                 dev_kfree_skb(skb);
12195                 return -EIO;
12196         }
12197
12198         val = tnapi->tx_prod;
12199         tnapi->tx_buffers[val].skb = skb;
12200         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12201
12202         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12203                rnapi->coal_now);
12204
12205         udelay(10);
12206
12207         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12208
12209         budget = tg3_tx_avail(tnapi);
12210         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12211                             base_flags | TXD_FLAG_END, mss, 0)) {
12212                 tnapi->tx_buffers[val].skb = NULL;
12213                 dev_kfree_skb(skb);
12214                 return -EIO;
12215         }
12216
12217         tnapi->tx_prod++;
12218
12219         /* Sync BD data before updating mailbox */
12220         wmb();
12221
12222         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12223         tr32_mailbox(tnapi->prodmbox);
12224
12225         udelay(10);
12226
12227         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12228         for (i = 0; i < 35; i++) {
12229                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12230                        coal_now);
12231
12232                 udelay(10);
12233
12234                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12235                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12236                 if ((tx_idx == tnapi->tx_prod) &&
12237                     (rx_idx == (rx_start_idx + num_pkts)))
12238                         break;
12239         }
12240
12241         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12242         dev_kfree_skb(skb);
12243
12244         if (tx_idx != tnapi->tx_prod)
12245                 goto out;
12246
12247         if (rx_idx != rx_start_idx + num_pkts)
12248                 goto out;
12249
12250         val = data_off;
12251         while (rx_idx != rx_start_idx) {
12252                 desc = &rnapi->rx_rcb[rx_start_idx++];
12253                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12254                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12255
12256                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12257                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12258                         goto out;
12259
12260                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12261                          - ETH_FCS_LEN;
12262
12263                 if (!tso_loopback) {
12264                         if (rx_len != tx_len)
12265                                 goto out;
12266
12267                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12268                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12269                                         goto out;
12270                         } else {
12271                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12272                                         goto out;
12273                         }
12274                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12275                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12276                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12277                         goto out;
12278                 }
12279
12280                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12281                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12282                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12283                                              mapping);
12284                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12285                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12286                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12287                                              mapping);
12288                 } else
12289                         goto out;
12290
12291                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12292                                             PCI_DMA_FROMDEVICE);
12293
12294                 rx_data += TG3_RX_OFFSET(tp);
12295                 for (i = data_off; i < rx_len; i++, val++) {
12296                         if (*(rx_data + i) != (u8) (val & 0xff))
12297                                 goto out;
12298                 }
12299         }
12300
12301         err = 0;
12302
12303         /* tg3_free_rings will unmap and free the rx_data */
12304 out:
12305         return err;
12306 }
12307
12308 #define TG3_STD_LOOPBACK_FAILED         1
12309 #define TG3_JMB_LOOPBACK_FAILED         2
12310 #define TG3_TSO_LOOPBACK_FAILED         4
12311 #define TG3_LOOPBACK_FAILED \
12312         (TG3_STD_LOOPBACK_FAILED | \
12313          TG3_JMB_LOOPBACK_FAILED | \
12314          TG3_TSO_LOOPBACK_FAILED)
12315
12316 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12317 {
12318         int err = -EIO;
12319         u32 eee_cap;
12320         u32 jmb_pkt_sz = 9000;
12321
12322         if (tp->dma_limit)
12323                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12324
12325         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12326         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12327
12328         if (!netif_running(tp->dev)) {
12329                 data[0] = TG3_LOOPBACK_FAILED;
12330                 data[1] = TG3_LOOPBACK_FAILED;
12331                 if (do_extlpbk)
12332                         data[2] = TG3_LOOPBACK_FAILED;
12333                 goto done;
12334         }
12335
12336         err = tg3_reset_hw(tp, 1);
12337         if (err) {
12338                 data[0] = TG3_LOOPBACK_FAILED;
12339                 data[1] = TG3_LOOPBACK_FAILED;
12340                 if (do_extlpbk)
12341                         data[2] = TG3_LOOPBACK_FAILED;
12342                 goto done;
12343         }
12344
12345         if (tg3_flag(tp, ENABLE_RSS)) {
12346                 int i;
12347
12348                 /* Reroute all rx packets to the 1st queue */
12349                 for (i = MAC_RSS_INDIR_TBL_0;
12350                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12351                         tw32(i, 0x0);
12352         }
12353
12354         /* HW errata - mac loopback fails in some cases on 5780.
12355          * Normal traffic and PHY loopback are not affected by
12356          * errata.  Also, the MAC loopback test is deprecated for
12357          * all newer ASIC revisions.
12358          */
12359         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12360             !tg3_flag(tp, CPMU_PRESENT)) {
12361                 tg3_mac_loopback(tp, true);
12362
12363                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12364                         data[0] |= TG3_STD_LOOPBACK_FAILED;
12365
12366                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12367                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12368                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
12369
12370                 tg3_mac_loopback(tp, false);
12371         }
12372
12373         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12374             !tg3_flag(tp, USE_PHYLIB)) {
12375                 int i;
12376
12377                 tg3_phy_lpbk_set(tp, 0, false);
12378
12379                 /* Wait for link */
12380                 for (i = 0; i < 100; i++) {
12381                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12382                                 break;
12383                         mdelay(1);
12384                 }
12385
12386                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12387                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12388                 if (tg3_flag(tp, TSO_CAPABLE) &&
12389                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12390                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12391                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12392                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12393                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12394
12395                 if (do_extlpbk) {
12396                         tg3_phy_lpbk_set(tp, 0, true);
12397
12398                         /* All link indications report up, but the hardware
12399                          * isn't really ready for about 20 msec.  Double it
12400                          * to be sure.
12401                          */
12402                         mdelay(40);
12403
12404                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12405                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12406                         if (tg3_flag(tp, TSO_CAPABLE) &&
12407                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12408                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12409                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12410                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12411                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12412                 }
12413
12414                 /* Re-enable gphy autopowerdown. */
12415                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12416                         tg3_phy_toggle_apd(tp, true);
12417         }
12418
12419         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12420
12421 done:
12422         tp->phy_flags |= eee_cap;
12423
12424         return err;
12425 }
12426
12427 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12428                           u64 *data)
12429 {
12430         struct tg3 *tp = netdev_priv(dev);
12431         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12432
12433         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12434             tg3_power_up(tp)) {
12435                 etest->flags |= ETH_TEST_FL_FAILED;
12436                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12437                 return;
12438         }
12439
12440         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12441
12442         if (tg3_test_nvram(tp) != 0) {
12443                 etest->flags |= ETH_TEST_FL_FAILED;
12444                 data[0] = 1;
12445         }
12446         if (!doextlpbk && tg3_test_link(tp)) {
12447                 etest->flags |= ETH_TEST_FL_FAILED;
12448                 data[1] = 1;
12449         }
12450         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12451                 int err, err2 = 0, irq_sync = 0;
12452
12453                 if (netif_running(dev)) {
12454                         tg3_phy_stop(tp);
12455                         tg3_netif_stop(tp);
12456                         irq_sync = 1;
12457                 }
12458
12459                 tg3_full_lock(tp, irq_sync);
12460
12461                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12462                 err = tg3_nvram_lock(tp);
12463                 tg3_halt_cpu(tp, RX_CPU_BASE);
12464                 if (!tg3_flag(tp, 5705_PLUS))
12465                         tg3_halt_cpu(tp, TX_CPU_BASE);
12466                 if (!err)
12467                         tg3_nvram_unlock(tp);
12468
12469                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12470                         tg3_phy_reset(tp);
12471
12472                 if (tg3_test_registers(tp) != 0) {
12473                         etest->flags |= ETH_TEST_FL_FAILED;
12474                         data[2] = 1;
12475                 }
12476
12477                 if (tg3_test_memory(tp) != 0) {
12478                         etest->flags |= ETH_TEST_FL_FAILED;
12479                         data[3] = 1;
12480                 }
12481
12482                 if (doextlpbk)
12483                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12484
12485                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12486                         etest->flags |= ETH_TEST_FL_FAILED;
12487
12488                 tg3_full_unlock(tp);
12489
12490                 if (tg3_test_interrupt(tp) != 0) {
12491                         etest->flags |= ETH_TEST_FL_FAILED;
12492                         data[7] = 1;
12493                 }
12494
12495                 tg3_full_lock(tp, 0);
12496
12497                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12498                 if (netif_running(dev)) {
12499                         tg3_flag_set(tp, INIT_COMPLETE);
12500                         err2 = tg3_restart_hw(tp, 1);
12501                         if (!err2)
12502                                 tg3_netif_start(tp);
12503                 }
12504
12505                 tg3_full_unlock(tp);
12506
12507                 if (irq_sync && !err2)
12508                         tg3_phy_start(tp);
12509         }
12510         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12511                 tg3_power_down(tp);
12512
12513 }
12514
12515 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12516 {
12517         struct mii_ioctl_data *data = if_mii(ifr);
12518         struct tg3 *tp = netdev_priv(dev);
12519         int err;
12520
12521         if (tg3_flag(tp, USE_PHYLIB)) {
12522                 struct phy_device *phydev;
12523                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12524                         return -EAGAIN;
12525                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12526                 return phy_mii_ioctl(phydev, ifr, cmd);
12527         }
12528
12529         switch (cmd) {
12530         case SIOCGMIIPHY:
12531                 data->phy_id = tp->phy_addr;
12532
12533                 /* fallthru */
12534         case SIOCGMIIREG: {
12535                 u32 mii_regval;
12536
12537                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12538                         break;                  /* We have no PHY */
12539
12540                 if (!netif_running(dev))
12541                         return -EAGAIN;
12542
12543                 spin_lock_bh(&tp->lock);
12544                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12545                 spin_unlock_bh(&tp->lock);
12546
12547                 data->val_out = mii_regval;
12548
12549                 return err;
12550         }
12551
12552         case SIOCSMIIREG:
12553                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12554                         break;                  /* We have no PHY */
12555
12556                 if (!netif_running(dev))
12557                         return -EAGAIN;
12558
12559                 spin_lock_bh(&tp->lock);
12560                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12561                 spin_unlock_bh(&tp->lock);
12562
12563                 return err;
12564
12565         default:
12566                 /* do nothing */
12567                 break;
12568         }
12569         return -EOPNOTSUPP;
12570 }
12571
12572 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12573 {
12574         struct tg3 *tp = netdev_priv(dev);
12575
12576         memcpy(ec, &tp->coal, sizeof(*ec));
12577         return 0;
12578 }
12579
12580 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12581 {
12582         struct tg3 *tp = netdev_priv(dev);
12583         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12584         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12585
12586         if (!tg3_flag(tp, 5705_PLUS)) {
12587                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12588                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12589                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12590                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12591         }
12592
12593         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12594             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12595             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12596             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12597             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12598             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12599             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12600             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12601             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12602             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12603                 return -EINVAL;
12604
12605         /* No rx interrupts will be generated if both are zero */
12606         if ((ec->rx_coalesce_usecs == 0) &&
12607             (ec->rx_max_coalesced_frames == 0))
12608                 return -EINVAL;
12609
12610         /* No tx interrupts will be generated if both are zero */
12611         if ((ec->tx_coalesce_usecs == 0) &&
12612             (ec->tx_max_coalesced_frames == 0))
12613                 return -EINVAL;
12614
12615         /* Only copy relevant parameters, ignore all others. */
12616         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12617         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12618         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12619         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12620         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12621         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12622         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12623         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12624         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12625
12626         if (netif_running(dev)) {
12627                 tg3_full_lock(tp, 0);
12628                 __tg3_set_coalesce(tp, &tp->coal);
12629                 tg3_full_unlock(tp);
12630         }
12631         return 0;
12632 }
12633
12634 static const struct ethtool_ops tg3_ethtool_ops = {
12635         .get_settings           = tg3_get_settings,
12636         .set_settings           = tg3_set_settings,
12637         .get_drvinfo            = tg3_get_drvinfo,
12638         .get_regs_len           = tg3_get_regs_len,
12639         .get_regs               = tg3_get_regs,
12640         .get_wol                = tg3_get_wol,
12641         .set_wol                = tg3_set_wol,
12642         .get_msglevel           = tg3_get_msglevel,
12643         .set_msglevel           = tg3_set_msglevel,
12644         .nway_reset             = tg3_nway_reset,
12645         .get_link               = ethtool_op_get_link,
12646         .get_eeprom_len         = tg3_get_eeprom_len,
12647         .get_eeprom             = tg3_get_eeprom,
12648         .set_eeprom             = tg3_set_eeprom,
12649         .get_ringparam          = tg3_get_ringparam,
12650         .set_ringparam          = tg3_set_ringparam,
12651         .get_pauseparam         = tg3_get_pauseparam,
12652         .set_pauseparam         = tg3_set_pauseparam,
12653         .self_test              = tg3_self_test,
12654         .get_strings            = tg3_get_strings,
12655         .set_phys_id            = tg3_set_phys_id,
12656         .get_ethtool_stats      = tg3_get_ethtool_stats,
12657         .get_coalesce           = tg3_get_coalesce,
12658         .set_coalesce           = tg3_set_coalesce,
12659         .get_sset_count         = tg3_get_sset_count,
12660         .get_rxnfc              = tg3_get_rxnfc,
12661         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12662         .get_rxfh_indir         = tg3_get_rxfh_indir,
12663         .set_rxfh_indir         = tg3_set_rxfh_indir,
12664         .get_channels           = tg3_get_channels,
12665         .set_channels           = tg3_set_channels,
12666         .get_ts_info            = ethtool_op_get_ts_info,
12667 };
12668
12669 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12670                                                 struct rtnl_link_stats64 *stats)
12671 {
12672         struct tg3 *tp = netdev_priv(dev);
12673
12674         spin_lock_bh(&tp->lock);
12675         if (!tp->hw_stats) {
12676                 spin_unlock_bh(&tp->lock);
12677                 return &tp->net_stats_prev;
12678         }
12679
12680         tg3_get_nstats(tp, stats);
12681         spin_unlock_bh(&tp->lock);
12682
12683         return stats;
12684 }
12685
12686 static void tg3_set_rx_mode(struct net_device *dev)
12687 {
12688         struct tg3 *tp = netdev_priv(dev);
12689
12690         if (!netif_running(dev))
12691                 return;
12692
12693         tg3_full_lock(tp, 0);
12694         __tg3_set_rx_mode(dev);
12695         tg3_full_unlock(tp);
12696 }
12697
12698 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12699                                int new_mtu)
12700 {
12701         dev->mtu = new_mtu;
12702
12703         if (new_mtu > ETH_DATA_LEN) {
12704                 if (tg3_flag(tp, 5780_CLASS)) {
12705                         netdev_update_features(dev);
12706                         tg3_flag_clear(tp, TSO_CAPABLE);
12707                 } else {
12708                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12709                 }
12710         } else {
12711                 if (tg3_flag(tp, 5780_CLASS)) {
12712                         tg3_flag_set(tp, TSO_CAPABLE);
12713                         netdev_update_features(dev);
12714                 }
12715                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12716         }
12717 }
12718
12719 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12720 {
12721         struct tg3 *tp = netdev_priv(dev);
12722         int err, reset_phy = 0;
12723
12724         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12725                 return -EINVAL;
12726
12727         if (!netif_running(dev)) {
12728                 /* We'll just catch it later when the
12729                  * device is up'd.
12730                  */
12731                 tg3_set_mtu(dev, tp, new_mtu);
12732                 return 0;
12733         }
12734
12735         tg3_phy_stop(tp);
12736
12737         tg3_netif_stop(tp);
12738
12739         tg3_full_lock(tp, 1);
12740
12741         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12742
12743         tg3_set_mtu(dev, tp, new_mtu);
12744
12745         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12746          * breaks all requests to 256 bytes.
12747          */
12748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12749                 reset_phy = 1;
12750
12751         err = tg3_restart_hw(tp, reset_phy);
12752
12753         if (!err)
12754                 tg3_netif_start(tp);
12755
12756         tg3_full_unlock(tp);
12757
12758         if (!err)
12759                 tg3_phy_start(tp);
12760
12761         return err;
12762 }
12763
12764 static const struct net_device_ops tg3_netdev_ops = {
12765         .ndo_open               = tg3_open,
12766         .ndo_stop               = tg3_close,
12767         .ndo_start_xmit         = tg3_start_xmit,
12768         .ndo_get_stats64        = tg3_get_stats64,
12769         .ndo_validate_addr      = eth_validate_addr,
12770         .ndo_set_rx_mode        = tg3_set_rx_mode,
12771         .ndo_set_mac_address    = tg3_set_mac_addr,
12772         .ndo_do_ioctl           = tg3_ioctl,
12773         .ndo_tx_timeout         = tg3_tx_timeout,
12774         .ndo_change_mtu         = tg3_change_mtu,
12775         .ndo_fix_features       = tg3_fix_features,
12776         .ndo_set_features       = tg3_set_features,
12777 #ifdef CONFIG_NET_POLL_CONTROLLER
12778         .ndo_poll_controller    = tg3_poll_controller,
12779 #endif
12780 };
12781
12782 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12783 {
12784         u32 cursize, val, magic;
12785
12786         tp->nvram_size = EEPROM_CHIP_SIZE;
12787
12788         if (tg3_nvram_read(tp, 0, &magic) != 0)
12789                 return;
12790
12791         if ((magic != TG3_EEPROM_MAGIC) &&
12792             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12793             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12794                 return;
12795
12796         /*
12797          * Size the chip by reading offsets at increasing powers of two.
12798          * When we encounter our validation signature, we know the addressing
12799          * has wrapped around, and thus have our chip size.
12800          */
12801         cursize = 0x10;
12802
12803         while (cursize < tp->nvram_size) {
12804                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12805                         return;
12806
12807                 if (val == magic)
12808                         break;
12809
12810                 cursize <<= 1;
12811         }
12812
12813         tp->nvram_size = cursize;
12814 }
12815
12816 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12817 {
12818         u32 val;
12819
12820         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12821                 return;
12822
12823         /* Selfboot format */
12824         if (val != TG3_EEPROM_MAGIC) {
12825                 tg3_get_eeprom_size(tp);
12826                 return;
12827         }
12828
12829         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12830                 if (val != 0) {
12831                         /* This is confusing.  We want to operate on the
12832                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12833                          * call will read from NVRAM and byteswap the data
12834                          * according to the byteswapping settings for all
12835                          * other register accesses.  This ensures the data we
12836                          * want will always reside in the lower 16-bits.
12837                          * However, the data in NVRAM is in LE format, which
12838                          * means the data from the NVRAM read will always be
12839                          * opposite the endianness of the CPU.  The 16-bit
12840                          * byteswap then brings the data to CPU endianness.
12841                          */
12842                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12843                         return;
12844                 }
12845         }
12846         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12847 }
12848
12849 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12850 {
12851         u32 nvcfg1;
12852
12853         nvcfg1 = tr32(NVRAM_CFG1);
12854         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12855                 tg3_flag_set(tp, FLASH);
12856         } else {
12857                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12858                 tw32(NVRAM_CFG1, nvcfg1);
12859         }
12860
12861         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12862             tg3_flag(tp, 5780_CLASS)) {
12863                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12864                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12865                         tp->nvram_jedecnum = JEDEC_ATMEL;
12866                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12867                         tg3_flag_set(tp, NVRAM_BUFFERED);
12868                         break;
12869                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12870                         tp->nvram_jedecnum = JEDEC_ATMEL;
12871                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12872                         break;
12873                 case FLASH_VENDOR_ATMEL_EEPROM:
12874                         tp->nvram_jedecnum = JEDEC_ATMEL;
12875                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12876                         tg3_flag_set(tp, NVRAM_BUFFERED);
12877                         break;
12878                 case FLASH_VENDOR_ST:
12879                         tp->nvram_jedecnum = JEDEC_ST;
12880                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12881                         tg3_flag_set(tp, NVRAM_BUFFERED);
12882                         break;
12883                 case FLASH_VENDOR_SAIFUN:
12884                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12885                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12886                         break;
12887                 case FLASH_VENDOR_SST_SMALL:
12888                 case FLASH_VENDOR_SST_LARGE:
12889                         tp->nvram_jedecnum = JEDEC_SST;
12890                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12891                         break;
12892                 }
12893         } else {
12894                 tp->nvram_jedecnum = JEDEC_ATMEL;
12895                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12896                 tg3_flag_set(tp, NVRAM_BUFFERED);
12897         }
12898 }
12899
12900 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12901 {
12902         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12903         case FLASH_5752PAGE_SIZE_256:
12904                 tp->nvram_pagesize = 256;
12905                 break;
12906         case FLASH_5752PAGE_SIZE_512:
12907                 tp->nvram_pagesize = 512;
12908                 break;
12909         case FLASH_5752PAGE_SIZE_1K:
12910                 tp->nvram_pagesize = 1024;
12911                 break;
12912         case FLASH_5752PAGE_SIZE_2K:
12913                 tp->nvram_pagesize = 2048;
12914                 break;
12915         case FLASH_5752PAGE_SIZE_4K:
12916                 tp->nvram_pagesize = 4096;
12917                 break;
12918         case FLASH_5752PAGE_SIZE_264:
12919                 tp->nvram_pagesize = 264;
12920                 break;
12921         case FLASH_5752PAGE_SIZE_528:
12922                 tp->nvram_pagesize = 528;
12923                 break;
12924         }
12925 }
12926
12927 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12928 {
12929         u32 nvcfg1;
12930
12931         nvcfg1 = tr32(NVRAM_CFG1);
12932
12933         /* NVRAM protection for TPM */
12934         if (nvcfg1 & (1 << 27))
12935                 tg3_flag_set(tp, PROTECTED_NVRAM);
12936
12937         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12938         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12939         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12940                 tp->nvram_jedecnum = JEDEC_ATMEL;
12941                 tg3_flag_set(tp, NVRAM_BUFFERED);
12942                 break;
12943         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12944                 tp->nvram_jedecnum = JEDEC_ATMEL;
12945                 tg3_flag_set(tp, NVRAM_BUFFERED);
12946                 tg3_flag_set(tp, FLASH);
12947                 break;
12948         case FLASH_5752VENDOR_ST_M45PE10:
12949         case FLASH_5752VENDOR_ST_M45PE20:
12950         case FLASH_5752VENDOR_ST_M45PE40:
12951                 tp->nvram_jedecnum = JEDEC_ST;
12952                 tg3_flag_set(tp, NVRAM_BUFFERED);
12953                 tg3_flag_set(tp, FLASH);
12954                 break;
12955         }
12956
12957         if (tg3_flag(tp, FLASH)) {
12958                 tg3_nvram_get_pagesize(tp, nvcfg1);
12959         } else {
12960                 /* For eeprom, set pagesize to maximum eeprom size */
12961                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12962
12963                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12964                 tw32(NVRAM_CFG1, nvcfg1);
12965         }
12966 }
12967
12968 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12969 {
12970         u32 nvcfg1, protect = 0;
12971
12972         nvcfg1 = tr32(NVRAM_CFG1);
12973
12974         /* NVRAM protection for TPM */
12975         if (nvcfg1 & (1 << 27)) {
12976                 tg3_flag_set(tp, PROTECTED_NVRAM);
12977                 protect = 1;
12978         }
12979
12980         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12981         switch (nvcfg1) {
12982         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12983         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12984         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12985         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12986                 tp->nvram_jedecnum = JEDEC_ATMEL;
12987                 tg3_flag_set(tp, NVRAM_BUFFERED);
12988                 tg3_flag_set(tp, FLASH);
12989                 tp->nvram_pagesize = 264;
12990                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12991                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12992                         tp->nvram_size = (protect ? 0x3e200 :
12993                                           TG3_NVRAM_SIZE_512KB);
12994                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12995                         tp->nvram_size = (protect ? 0x1f200 :
12996                                           TG3_NVRAM_SIZE_256KB);
12997                 else
12998                         tp->nvram_size = (protect ? 0x1f200 :
12999                                           TG3_NVRAM_SIZE_128KB);
13000                 break;
13001         case FLASH_5752VENDOR_ST_M45PE10:
13002         case FLASH_5752VENDOR_ST_M45PE20:
13003         case FLASH_5752VENDOR_ST_M45PE40:
13004                 tp->nvram_jedecnum = JEDEC_ST;
13005                 tg3_flag_set(tp, NVRAM_BUFFERED);
13006                 tg3_flag_set(tp, FLASH);
13007                 tp->nvram_pagesize = 256;
13008                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13009                         tp->nvram_size = (protect ?
13010                                           TG3_NVRAM_SIZE_64KB :
13011                                           TG3_NVRAM_SIZE_128KB);
13012                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13013                         tp->nvram_size = (protect ?
13014                                           TG3_NVRAM_SIZE_64KB :
13015                                           TG3_NVRAM_SIZE_256KB);
13016                 else
13017                         tp->nvram_size = (protect ?
13018                                           TG3_NVRAM_SIZE_128KB :
13019                                           TG3_NVRAM_SIZE_512KB);
13020                 break;
13021         }
13022 }
13023
13024 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13025 {
13026         u32 nvcfg1;
13027
13028         nvcfg1 = tr32(NVRAM_CFG1);
13029
13030         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13031         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13032         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13033         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13034         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13035                 tp->nvram_jedecnum = JEDEC_ATMEL;
13036                 tg3_flag_set(tp, NVRAM_BUFFERED);
13037                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13038
13039                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13040                 tw32(NVRAM_CFG1, nvcfg1);
13041                 break;
13042         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13043         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13044         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13045         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13046                 tp->nvram_jedecnum = JEDEC_ATMEL;
13047                 tg3_flag_set(tp, NVRAM_BUFFERED);
13048                 tg3_flag_set(tp, FLASH);
13049                 tp->nvram_pagesize = 264;
13050                 break;
13051         case FLASH_5752VENDOR_ST_M45PE10:
13052         case FLASH_5752VENDOR_ST_M45PE20:
13053         case FLASH_5752VENDOR_ST_M45PE40:
13054                 tp->nvram_jedecnum = JEDEC_ST;
13055                 tg3_flag_set(tp, NVRAM_BUFFERED);
13056                 tg3_flag_set(tp, FLASH);
13057                 tp->nvram_pagesize = 256;
13058                 break;
13059         }
13060 }
13061
13062 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13063 {
13064         u32 nvcfg1, protect = 0;
13065
13066         nvcfg1 = tr32(NVRAM_CFG1);
13067
13068         /* NVRAM protection for TPM */
13069         if (nvcfg1 & (1 << 27)) {
13070                 tg3_flag_set(tp, PROTECTED_NVRAM);
13071                 protect = 1;
13072         }
13073
13074         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13075         switch (nvcfg1) {
13076         case FLASH_5761VENDOR_ATMEL_ADB021D:
13077         case FLASH_5761VENDOR_ATMEL_ADB041D:
13078         case FLASH_5761VENDOR_ATMEL_ADB081D:
13079         case FLASH_5761VENDOR_ATMEL_ADB161D:
13080         case FLASH_5761VENDOR_ATMEL_MDB021D:
13081         case FLASH_5761VENDOR_ATMEL_MDB041D:
13082         case FLASH_5761VENDOR_ATMEL_MDB081D:
13083         case FLASH_5761VENDOR_ATMEL_MDB161D:
13084                 tp->nvram_jedecnum = JEDEC_ATMEL;
13085                 tg3_flag_set(tp, NVRAM_BUFFERED);
13086                 tg3_flag_set(tp, FLASH);
13087                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13088                 tp->nvram_pagesize = 256;
13089                 break;
13090         case FLASH_5761VENDOR_ST_A_M45PE20:
13091         case FLASH_5761VENDOR_ST_A_M45PE40:
13092         case FLASH_5761VENDOR_ST_A_M45PE80:
13093         case FLASH_5761VENDOR_ST_A_M45PE16:
13094         case FLASH_5761VENDOR_ST_M_M45PE20:
13095         case FLASH_5761VENDOR_ST_M_M45PE40:
13096         case FLASH_5761VENDOR_ST_M_M45PE80:
13097         case FLASH_5761VENDOR_ST_M_M45PE16:
13098                 tp->nvram_jedecnum = JEDEC_ST;
13099                 tg3_flag_set(tp, NVRAM_BUFFERED);
13100                 tg3_flag_set(tp, FLASH);
13101                 tp->nvram_pagesize = 256;
13102                 break;
13103         }
13104
13105         if (protect) {
13106                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13107         } else {
13108                 switch (nvcfg1) {
13109                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13110                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13111                 case FLASH_5761VENDOR_ST_A_M45PE16:
13112                 case FLASH_5761VENDOR_ST_M_M45PE16:
13113                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13114                         break;
13115                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13116                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13117                 case FLASH_5761VENDOR_ST_A_M45PE80:
13118                 case FLASH_5761VENDOR_ST_M_M45PE80:
13119                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13120                         break;
13121                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13122                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13123                 case FLASH_5761VENDOR_ST_A_M45PE40:
13124                 case FLASH_5761VENDOR_ST_M_M45PE40:
13125                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13126                         break;
13127                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13128                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13129                 case FLASH_5761VENDOR_ST_A_M45PE20:
13130                 case FLASH_5761VENDOR_ST_M_M45PE20:
13131                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13132                         break;
13133                 }
13134         }
13135 }
13136
13137 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13138 {
13139         tp->nvram_jedecnum = JEDEC_ATMEL;
13140         tg3_flag_set(tp, NVRAM_BUFFERED);
13141         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13142 }
13143
13144 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13145 {
13146         u32 nvcfg1;
13147
13148         nvcfg1 = tr32(NVRAM_CFG1);
13149
13150         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13151         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13152         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13153                 tp->nvram_jedecnum = JEDEC_ATMEL;
13154                 tg3_flag_set(tp, NVRAM_BUFFERED);
13155                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13156
13157                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13158                 tw32(NVRAM_CFG1, nvcfg1);
13159                 return;
13160         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13161         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13162         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13163         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13164         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13165         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13166         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13167                 tp->nvram_jedecnum = JEDEC_ATMEL;
13168                 tg3_flag_set(tp, NVRAM_BUFFERED);
13169                 tg3_flag_set(tp, FLASH);
13170
13171                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13172                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13173                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13174                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13175                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13176                         break;
13177                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13178                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13179                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13180                         break;
13181                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13182                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13183                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13184                         break;
13185                 }
13186                 break;
13187         case FLASH_5752VENDOR_ST_M45PE10:
13188         case FLASH_5752VENDOR_ST_M45PE20:
13189         case FLASH_5752VENDOR_ST_M45PE40:
13190                 tp->nvram_jedecnum = JEDEC_ST;
13191                 tg3_flag_set(tp, NVRAM_BUFFERED);
13192                 tg3_flag_set(tp, FLASH);
13193
13194                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13195                 case FLASH_5752VENDOR_ST_M45PE10:
13196                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13197                         break;
13198                 case FLASH_5752VENDOR_ST_M45PE20:
13199                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13200                         break;
13201                 case FLASH_5752VENDOR_ST_M45PE40:
13202                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13203                         break;
13204                 }
13205                 break;
13206         default:
13207                 tg3_flag_set(tp, NO_NVRAM);
13208                 return;
13209         }
13210
13211         tg3_nvram_get_pagesize(tp, nvcfg1);
13212         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13213                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13214 }
13215
13216
13217 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13218 {
13219         u32 nvcfg1;
13220
13221         nvcfg1 = tr32(NVRAM_CFG1);
13222
13223         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13224         case FLASH_5717VENDOR_ATMEL_EEPROM:
13225         case FLASH_5717VENDOR_MICRO_EEPROM:
13226                 tp->nvram_jedecnum = JEDEC_ATMEL;
13227                 tg3_flag_set(tp, NVRAM_BUFFERED);
13228                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13229
13230                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13231                 tw32(NVRAM_CFG1, nvcfg1);
13232                 return;
13233         case FLASH_5717VENDOR_ATMEL_MDB011D:
13234         case FLASH_5717VENDOR_ATMEL_ADB011B:
13235         case FLASH_5717VENDOR_ATMEL_ADB011D:
13236         case FLASH_5717VENDOR_ATMEL_MDB021D:
13237         case FLASH_5717VENDOR_ATMEL_ADB021B:
13238         case FLASH_5717VENDOR_ATMEL_ADB021D:
13239         case FLASH_5717VENDOR_ATMEL_45USPT:
13240                 tp->nvram_jedecnum = JEDEC_ATMEL;
13241                 tg3_flag_set(tp, NVRAM_BUFFERED);
13242                 tg3_flag_set(tp, FLASH);
13243
13244                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13245                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13246                         /* Detect size with tg3_nvram_get_size() */
13247                         break;
13248                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13249                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13250                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13251                         break;
13252                 default:
13253                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13254                         break;
13255                 }
13256                 break;
13257         case FLASH_5717VENDOR_ST_M_M25PE10:
13258         case FLASH_5717VENDOR_ST_A_M25PE10:
13259         case FLASH_5717VENDOR_ST_M_M45PE10:
13260         case FLASH_5717VENDOR_ST_A_M45PE10:
13261         case FLASH_5717VENDOR_ST_M_M25PE20:
13262         case FLASH_5717VENDOR_ST_A_M25PE20:
13263         case FLASH_5717VENDOR_ST_M_M45PE20:
13264         case FLASH_5717VENDOR_ST_A_M45PE20:
13265         case FLASH_5717VENDOR_ST_25USPT:
13266         case FLASH_5717VENDOR_ST_45USPT:
13267                 tp->nvram_jedecnum = JEDEC_ST;
13268                 tg3_flag_set(tp, NVRAM_BUFFERED);
13269                 tg3_flag_set(tp, FLASH);
13270
13271                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13272                 case FLASH_5717VENDOR_ST_M_M25PE20:
13273                 case FLASH_5717VENDOR_ST_M_M45PE20:
13274                         /* Detect size with tg3_nvram_get_size() */
13275                         break;
13276                 case FLASH_5717VENDOR_ST_A_M25PE20:
13277                 case FLASH_5717VENDOR_ST_A_M45PE20:
13278                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13279                         break;
13280                 default:
13281                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13282                         break;
13283                 }
13284                 break;
13285         default:
13286                 tg3_flag_set(tp, NO_NVRAM);
13287                 return;
13288         }
13289
13290         tg3_nvram_get_pagesize(tp, nvcfg1);
13291         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13292                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13293 }
13294
13295 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13296 {
13297         u32 nvcfg1, nvmpinstrp;
13298
13299         nvcfg1 = tr32(NVRAM_CFG1);
13300         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13301
13302         switch (nvmpinstrp) {
13303         case FLASH_5720_EEPROM_HD:
13304         case FLASH_5720_EEPROM_LD:
13305                 tp->nvram_jedecnum = JEDEC_ATMEL;
13306                 tg3_flag_set(tp, NVRAM_BUFFERED);
13307
13308                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13309                 tw32(NVRAM_CFG1, nvcfg1);
13310                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13311                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13312                 else
13313                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13314                 return;
13315         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13316         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13317         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13318         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13319         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13320         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13321         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13322         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13323         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13324         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13325         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13326         case FLASH_5720VENDOR_ATMEL_45USPT:
13327                 tp->nvram_jedecnum = JEDEC_ATMEL;
13328                 tg3_flag_set(tp, NVRAM_BUFFERED);
13329                 tg3_flag_set(tp, FLASH);
13330
13331                 switch (nvmpinstrp) {
13332                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13333                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13334                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13335                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13336                         break;
13337                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13338                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13339                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13340                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13341                         break;
13342                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13343                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13344                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13345                         break;
13346                 default:
13347                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13348                         break;
13349                 }
13350                 break;
13351         case FLASH_5720VENDOR_M_ST_M25PE10:
13352         case FLASH_5720VENDOR_M_ST_M45PE10:
13353         case FLASH_5720VENDOR_A_ST_M25PE10:
13354         case FLASH_5720VENDOR_A_ST_M45PE10:
13355         case FLASH_5720VENDOR_M_ST_M25PE20:
13356         case FLASH_5720VENDOR_M_ST_M45PE20:
13357         case FLASH_5720VENDOR_A_ST_M25PE20:
13358         case FLASH_5720VENDOR_A_ST_M45PE20:
13359         case FLASH_5720VENDOR_M_ST_M25PE40:
13360         case FLASH_5720VENDOR_M_ST_M45PE40:
13361         case FLASH_5720VENDOR_A_ST_M25PE40:
13362         case FLASH_5720VENDOR_A_ST_M45PE40:
13363         case FLASH_5720VENDOR_M_ST_M25PE80:
13364         case FLASH_5720VENDOR_M_ST_M45PE80:
13365         case FLASH_5720VENDOR_A_ST_M25PE80:
13366         case FLASH_5720VENDOR_A_ST_M45PE80:
13367         case FLASH_5720VENDOR_ST_25USPT:
13368         case FLASH_5720VENDOR_ST_45USPT:
13369                 tp->nvram_jedecnum = JEDEC_ST;
13370                 tg3_flag_set(tp, NVRAM_BUFFERED);
13371                 tg3_flag_set(tp, FLASH);
13372
13373                 switch (nvmpinstrp) {
13374                 case FLASH_5720VENDOR_M_ST_M25PE20:
13375                 case FLASH_5720VENDOR_M_ST_M45PE20:
13376                 case FLASH_5720VENDOR_A_ST_M25PE20:
13377                 case FLASH_5720VENDOR_A_ST_M45PE20:
13378                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13379                         break;
13380                 case FLASH_5720VENDOR_M_ST_M25PE40:
13381                 case FLASH_5720VENDOR_M_ST_M45PE40:
13382                 case FLASH_5720VENDOR_A_ST_M25PE40:
13383                 case FLASH_5720VENDOR_A_ST_M45PE40:
13384                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13385                         break;
13386                 case FLASH_5720VENDOR_M_ST_M25PE80:
13387                 case FLASH_5720VENDOR_M_ST_M45PE80:
13388                 case FLASH_5720VENDOR_A_ST_M25PE80:
13389                 case FLASH_5720VENDOR_A_ST_M45PE80:
13390                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13391                         break;
13392                 default:
13393                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13394                         break;
13395                 }
13396                 break;
13397         default:
13398                 tg3_flag_set(tp, NO_NVRAM);
13399                 return;
13400         }
13401
13402         tg3_nvram_get_pagesize(tp, nvcfg1);
13403         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13404                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13405 }
13406
13407 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13408 static void __devinit tg3_nvram_init(struct tg3 *tp)
13409 {
13410         tw32_f(GRC_EEPROM_ADDR,
13411              (EEPROM_ADDR_FSM_RESET |
13412               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13413                EEPROM_ADDR_CLKPERD_SHIFT)));
13414
13415         msleep(1);
13416
13417         /* Enable seeprom accesses. */
13418         tw32_f(GRC_LOCAL_CTRL,
13419              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13420         udelay(100);
13421
13422         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13423             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13424                 tg3_flag_set(tp, NVRAM);
13425
13426                 if (tg3_nvram_lock(tp)) {
13427                         netdev_warn(tp->dev,
13428                                     "Cannot get nvram lock, %s failed\n",
13429                                     __func__);
13430                         return;
13431                 }
13432                 tg3_enable_nvram_access(tp);
13433
13434                 tp->nvram_size = 0;
13435
13436                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13437                         tg3_get_5752_nvram_info(tp);
13438                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13439                         tg3_get_5755_nvram_info(tp);
13440                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13441                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13442                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13443                         tg3_get_5787_nvram_info(tp);
13444                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13445                         tg3_get_5761_nvram_info(tp);
13446                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13447                         tg3_get_5906_nvram_info(tp);
13448                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13449                          tg3_flag(tp, 57765_CLASS))
13450                         tg3_get_57780_nvram_info(tp);
13451                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13452                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13453                         tg3_get_5717_nvram_info(tp);
13454                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13455                         tg3_get_5720_nvram_info(tp);
13456                 else
13457                         tg3_get_nvram_info(tp);
13458
13459                 if (tp->nvram_size == 0)
13460                         tg3_get_nvram_size(tp);
13461
13462                 tg3_disable_nvram_access(tp);
13463                 tg3_nvram_unlock(tp);
13464
13465         } else {
13466                 tg3_flag_clear(tp, NVRAM);
13467                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13468
13469                 tg3_get_eeprom_size(tp);
13470         }
13471 }
13472
13473 struct subsys_tbl_ent {
13474         u16 subsys_vendor, subsys_devid;
13475         u32 phy_id;
13476 };
13477
13478 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13479         /* Broadcom boards. */
13480         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13481           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13482         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13483           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13484         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13485           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13486         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13487           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13488         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13489           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13490         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13491           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13492         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13493           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13494         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13495           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13496         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13497           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13498         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13499           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13500         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13501           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13502
13503         /* 3com boards. */
13504         { TG3PCI_SUBVENDOR_ID_3COM,
13505           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13506         { TG3PCI_SUBVENDOR_ID_3COM,
13507           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13508         { TG3PCI_SUBVENDOR_ID_3COM,
13509           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13510         { TG3PCI_SUBVENDOR_ID_3COM,
13511           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13512         { TG3PCI_SUBVENDOR_ID_3COM,
13513           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13514
13515         /* DELL boards. */
13516         { TG3PCI_SUBVENDOR_ID_DELL,
13517           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13518         { TG3PCI_SUBVENDOR_ID_DELL,
13519           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13520         { TG3PCI_SUBVENDOR_ID_DELL,
13521           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13522         { TG3PCI_SUBVENDOR_ID_DELL,
13523           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13524
13525         /* Compaq boards. */
13526         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13527           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13528         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13529           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13530         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13531           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13532         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13533           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13534         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13535           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13536
13537         /* IBM boards. */
13538         { TG3PCI_SUBVENDOR_ID_IBM,
13539           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13540 };
13541
13542 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13543 {
13544         int i;
13545
13546         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13547                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13548                      tp->pdev->subsystem_vendor) &&
13549                     (subsys_id_to_phy_id[i].subsys_devid ==
13550                      tp->pdev->subsystem_device))
13551                         return &subsys_id_to_phy_id[i];
13552         }
13553         return NULL;
13554 }
13555
13556 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13557 {
13558         u32 val;
13559
13560         tp->phy_id = TG3_PHY_ID_INVALID;
13561         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13562
13563         /* Assume an onboard device and WOL capable by default.  */
13564         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13565         tg3_flag_set(tp, WOL_CAP);
13566
13567         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13568                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13569                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13570                         tg3_flag_set(tp, IS_NIC);
13571                 }
13572                 val = tr32(VCPU_CFGSHDW);
13573                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13574                         tg3_flag_set(tp, ASPM_WORKAROUND);
13575                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13576                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13577                         tg3_flag_set(tp, WOL_ENABLE);
13578                         device_set_wakeup_enable(&tp->pdev->dev, true);
13579                 }
13580                 goto done;
13581         }
13582
13583         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13584         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13585                 u32 nic_cfg, led_cfg;
13586                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13587                 int eeprom_phy_serdes = 0;
13588
13589                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13590                 tp->nic_sram_data_cfg = nic_cfg;
13591
13592                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13593                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13594                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13595                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13596                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13597                     (ver > 0) && (ver < 0x100))
13598                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13599
13600                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13601                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13602
13603                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13604                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13605                         eeprom_phy_serdes = 1;
13606
13607                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13608                 if (nic_phy_id != 0) {
13609                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13610                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13611
13612                         eeprom_phy_id  = (id1 >> 16) << 10;
13613                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13614                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13615                 } else
13616                         eeprom_phy_id = 0;
13617
13618                 tp->phy_id = eeprom_phy_id;
13619                 if (eeprom_phy_serdes) {
13620                         if (!tg3_flag(tp, 5705_PLUS))
13621                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13622                         else
13623                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13624                 }
13625
13626                 if (tg3_flag(tp, 5750_PLUS))
13627                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13628                                     SHASTA_EXT_LED_MODE_MASK);
13629                 else
13630                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13631
13632                 switch (led_cfg) {
13633                 default:
13634                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13635                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13636                         break;
13637
13638                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13639                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13640                         break;
13641
13642                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13643                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13644
13645                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13646                          * read on some older 5700/5701 bootcode.
13647                          */
13648                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13649                             ASIC_REV_5700 ||
13650                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13651                             ASIC_REV_5701)
13652                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13653
13654                         break;
13655
13656                 case SHASTA_EXT_LED_SHARED:
13657                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13658                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13659                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13660                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13661                                                  LED_CTRL_MODE_PHY_2);
13662                         break;
13663
13664                 case SHASTA_EXT_LED_MAC:
13665                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13666                         break;
13667
13668                 case SHASTA_EXT_LED_COMBO:
13669                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13670                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13671                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13672                                                  LED_CTRL_MODE_PHY_2);
13673                         break;
13674
13675                 }
13676
13677                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13678                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13679                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13680                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13681
13682                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13683                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13684
13685                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13686                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13687                         if ((tp->pdev->subsystem_vendor ==
13688                              PCI_VENDOR_ID_ARIMA) &&
13689                             (tp->pdev->subsystem_device == 0x205a ||
13690                              tp->pdev->subsystem_device == 0x2063))
13691                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13692                 } else {
13693                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13694                         tg3_flag_set(tp, IS_NIC);
13695                 }
13696
13697                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13698                         tg3_flag_set(tp, ENABLE_ASF);
13699                         if (tg3_flag(tp, 5750_PLUS))
13700                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13701                 }
13702
13703                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13704                     tg3_flag(tp, 5750_PLUS))
13705                         tg3_flag_set(tp, ENABLE_APE);
13706
13707                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13708                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13709                         tg3_flag_clear(tp, WOL_CAP);
13710
13711                 if (tg3_flag(tp, WOL_CAP) &&
13712                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13713                         tg3_flag_set(tp, WOL_ENABLE);
13714                         device_set_wakeup_enable(&tp->pdev->dev, true);
13715                 }
13716
13717                 if (cfg2 & (1 << 17))
13718                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13719
13720                 /* serdes signal pre-emphasis in register 0x590 set by */
13721                 /* bootcode if bit 18 is set */
13722                 if (cfg2 & (1 << 18))
13723                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13724
13725                 if ((tg3_flag(tp, 57765_PLUS) ||
13726                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13727                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13728                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13729                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13730
13731                 if (tg3_flag(tp, PCI_EXPRESS) &&
13732                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13733                     !tg3_flag(tp, 57765_PLUS)) {
13734                         u32 cfg3;
13735
13736                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13737                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13738                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13739                 }
13740
13741                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13742                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13743                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13744                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13745                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13746                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13747         }
13748 done:
13749         if (tg3_flag(tp, WOL_CAP))
13750                 device_set_wakeup_enable(&tp->pdev->dev,
13751                                          tg3_flag(tp, WOL_ENABLE));
13752         else
13753                 device_set_wakeup_capable(&tp->pdev->dev, false);
13754 }
13755
13756 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13757 {
13758         int i;
13759         u32 val;
13760
13761         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13762         tw32(OTP_CTRL, cmd);
13763
13764         /* Wait for up to 1 ms for command to execute. */
13765         for (i = 0; i < 100; i++) {
13766                 val = tr32(OTP_STATUS);
13767                 if (val & OTP_STATUS_CMD_DONE)
13768                         break;
13769                 udelay(10);
13770         }
13771
13772         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13773 }
13774
13775 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13776  * configuration is a 32-bit value that straddles the alignment boundary.
13777  * We do two 32-bit reads and then shift and merge the results.
13778  */
13779 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13780 {
13781         u32 bhalf_otp, thalf_otp;
13782
13783         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13784
13785         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13786                 return 0;
13787
13788         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13789
13790         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13791                 return 0;
13792
13793         thalf_otp = tr32(OTP_READ_DATA);
13794
13795         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13796
13797         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13798                 return 0;
13799
13800         bhalf_otp = tr32(OTP_READ_DATA);
13801
13802         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13803 }
13804
13805 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13806 {
13807         u32 adv = ADVERTISED_Autoneg;
13808
13809         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13810                 adv |= ADVERTISED_1000baseT_Half |
13811                        ADVERTISED_1000baseT_Full;
13812
13813         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13814                 adv |= ADVERTISED_100baseT_Half |
13815                        ADVERTISED_100baseT_Full |
13816                        ADVERTISED_10baseT_Half |
13817                        ADVERTISED_10baseT_Full |
13818                        ADVERTISED_TP;
13819         else
13820                 adv |= ADVERTISED_FIBRE;
13821
13822         tp->link_config.advertising = adv;
13823         tp->link_config.speed = SPEED_UNKNOWN;
13824         tp->link_config.duplex = DUPLEX_UNKNOWN;
13825         tp->link_config.autoneg = AUTONEG_ENABLE;
13826         tp->link_config.active_speed = SPEED_UNKNOWN;
13827         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13828
13829         tp->old_link = -1;
13830 }
13831
13832 static int __devinit tg3_phy_probe(struct tg3 *tp)
13833 {
13834         u32 hw_phy_id_1, hw_phy_id_2;
13835         u32 hw_phy_id, hw_phy_id_masked;
13836         int err;
13837
13838         /* flow control autonegotiation is default behavior */
13839         tg3_flag_set(tp, PAUSE_AUTONEG);
13840         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13841
13842         if (tg3_flag(tp, ENABLE_APE)) {
13843                 switch (tp->pci_fn) {
13844                 case 0:
13845                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13846                         break;
13847                 case 1:
13848                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13849                         break;
13850                 case 2:
13851                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13852                         break;
13853                 case 3:
13854                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13855                         break;
13856                 }
13857         }
13858
13859         if (tg3_flag(tp, USE_PHYLIB))
13860                 return tg3_phy_init(tp);
13861
13862         /* Reading the PHY ID register can conflict with ASF
13863          * firmware access to the PHY hardware.
13864          */
13865         err = 0;
13866         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13867                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13868         } else {
13869                 /* Now read the physical PHY_ID from the chip and verify
13870                  * that it is sane.  If it doesn't look good, we fall back
13871                  * to either the hard-coded table based PHY_ID and failing
13872                  * that the value found in the eeprom area.
13873                  */
13874                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13875                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13876
13877                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13878                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13879                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13880
13881                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13882         }
13883
13884         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13885                 tp->phy_id = hw_phy_id;
13886                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13887                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13888                 else
13889                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13890         } else {
13891                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13892                         /* Do nothing, phy ID already set up in
13893                          * tg3_get_eeprom_hw_cfg().
13894                          */
13895                 } else {
13896                         struct subsys_tbl_ent *p;
13897
13898                         /* No eeprom signature?  Try the hardcoded
13899                          * subsys device table.
13900                          */
13901                         p = tg3_lookup_by_subsys(tp);
13902                         if (!p)
13903                                 return -ENODEV;
13904
13905                         tp->phy_id = p->phy_id;
13906                         if (!tp->phy_id ||
13907                             tp->phy_id == TG3_PHY_ID_BCM8002)
13908                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13909                 }
13910         }
13911
13912         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13913             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13914              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13915              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13916               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13917              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13918               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13919                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13920
13921         tg3_phy_init_link_config(tp);
13922
13923         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13924             !tg3_flag(tp, ENABLE_APE) &&
13925             !tg3_flag(tp, ENABLE_ASF)) {
13926                 u32 bmsr, dummy;
13927
13928                 tg3_readphy(tp, MII_BMSR, &bmsr);
13929                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13930                     (bmsr & BMSR_LSTATUS))
13931                         goto skip_phy_reset;
13932
13933                 err = tg3_phy_reset(tp);
13934                 if (err)
13935                         return err;
13936
13937                 tg3_phy_set_wirespeed(tp);
13938
13939                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13940                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13941                                             tp->link_config.flowctrl);
13942
13943                         tg3_writephy(tp, MII_BMCR,
13944                                      BMCR_ANENABLE | BMCR_ANRESTART);
13945                 }
13946         }
13947
13948 skip_phy_reset:
13949         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13950                 err = tg3_init_5401phy_dsp(tp);
13951                 if (err)
13952                         return err;
13953
13954                 err = tg3_init_5401phy_dsp(tp);
13955         }
13956
13957         return err;
13958 }
13959
13960 static void __devinit tg3_read_vpd(struct tg3 *tp)
13961 {
13962         u8 *vpd_data;
13963         unsigned int block_end, rosize, len;
13964         u32 vpdlen;
13965         int j, i = 0;
13966
13967         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13968         if (!vpd_data)
13969                 goto out_no_vpd;
13970
13971         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13972         if (i < 0)
13973                 goto out_not_found;
13974
13975         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13976         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13977         i += PCI_VPD_LRDT_TAG_SIZE;
13978
13979         if (block_end > vpdlen)
13980                 goto out_not_found;
13981
13982         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13983                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13984         if (j > 0) {
13985                 len = pci_vpd_info_field_size(&vpd_data[j]);
13986
13987                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13988                 if (j + len > block_end || len != 4 ||
13989                     memcmp(&vpd_data[j], "1028", 4))
13990                         goto partno;
13991
13992                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13993                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13994                 if (j < 0)
13995                         goto partno;
13996
13997                 len = pci_vpd_info_field_size(&vpd_data[j]);
13998
13999                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14000                 if (j + len > block_end)
14001                         goto partno;
14002
14003                 memcpy(tp->fw_ver, &vpd_data[j], len);
14004                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14005         }
14006
14007 partno:
14008         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14009                                       PCI_VPD_RO_KEYWORD_PARTNO);
14010         if (i < 0)
14011                 goto out_not_found;
14012
14013         len = pci_vpd_info_field_size(&vpd_data[i]);
14014
14015         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14016         if (len > TG3_BPN_SIZE ||
14017             (len + i) > vpdlen)
14018                 goto out_not_found;
14019
14020         memcpy(tp->board_part_number, &vpd_data[i], len);
14021
14022 out_not_found:
14023         kfree(vpd_data);
14024         if (tp->board_part_number[0])
14025                 return;
14026
14027 out_no_vpd:
14028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14029                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
14030                         strcpy(tp->board_part_number, "BCM5717");
14031                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14032                         strcpy(tp->board_part_number, "BCM5718");
14033                 else
14034                         goto nomatch;
14035         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14036                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14037                         strcpy(tp->board_part_number, "BCM57780");
14038                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14039                         strcpy(tp->board_part_number, "BCM57760");
14040                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14041                         strcpy(tp->board_part_number, "BCM57790");
14042                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14043                         strcpy(tp->board_part_number, "BCM57788");
14044                 else
14045                         goto nomatch;
14046         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14047                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14048                         strcpy(tp->board_part_number, "BCM57761");
14049                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14050                         strcpy(tp->board_part_number, "BCM57765");
14051                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14052                         strcpy(tp->board_part_number, "BCM57781");
14053                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14054                         strcpy(tp->board_part_number, "BCM57785");
14055                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14056                         strcpy(tp->board_part_number, "BCM57791");
14057                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14058                         strcpy(tp->board_part_number, "BCM57795");
14059                 else
14060                         goto nomatch;
14061         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14062                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14063                         strcpy(tp->board_part_number, "BCM57762");
14064                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14065                         strcpy(tp->board_part_number, "BCM57766");
14066                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14067                         strcpy(tp->board_part_number, "BCM57782");
14068                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14069                         strcpy(tp->board_part_number, "BCM57786");
14070                 else
14071                         goto nomatch;
14072         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14073                 strcpy(tp->board_part_number, "BCM95906");
14074         } else {
14075 nomatch:
14076                 strcpy(tp->board_part_number, "none");
14077         }
14078 }
14079
14080 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14081 {
14082         u32 val;
14083
14084         if (tg3_nvram_read(tp, offset, &val) ||
14085             (val & 0xfc000000) != 0x0c000000 ||
14086             tg3_nvram_read(tp, offset + 4, &val) ||
14087             val != 0)
14088                 return 0;
14089
14090         return 1;
14091 }
14092
14093 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14094 {
14095         u32 val, offset, start, ver_offset;
14096         int i, dst_off;
14097         bool newver = false;
14098
14099         if (tg3_nvram_read(tp, 0xc, &offset) ||
14100             tg3_nvram_read(tp, 0x4, &start))
14101                 return;
14102
14103         offset = tg3_nvram_logical_addr(tp, offset);
14104
14105         if (tg3_nvram_read(tp, offset, &val))
14106                 return;
14107
14108         if ((val & 0xfc000000) == 0x0c000000) {
14109                 if (tg3_nvram_read(tp, offset + 4, &val))
14110                         return;
14111
14112                 if (val == 0)
14113                         newver = true;
14114         }
14115
14116         dst_off = strlen(tp->fw_ver);
14117
14118         if (newver) {
14119                 if (TG3_VER_SIZE - dst_off < 16 ||
14120                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14121                         return;
14122
14123                 offset = offset + ver_offset - start;
14124                 for (i = 0; i < 16; i += 4) {
14125                         __be32 v;
14126                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14127                                 return;
14128
14129                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14130                 }
14131         } else {
14132                 u32 major, minor;
14133
14134                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14135                         return;
14136
14137                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14138                         TG3_NVM_BCVER_MAJSFT;
14139                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14140                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14141                          "v%d.%02d", major, minor);
14142         }
14143 }
14144
14145 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14146 {
14147         u32 val, major, minor;
14148
14149         /* Use native endian representation */
14150         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14151                 return;
14152
14153         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14154                 TG3_NVM_HWSB_CFG1_MAJSFT;
14155         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14156                 TG3_NVM_HWSB_CFG1_MINSFT;
14157
14158         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14159 }
14160
14161 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14162 {
14163         u32 offset, major, minor, build;
14164
14165         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14166
14167         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14168                 return;
14169
14170         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14171         case TG3_EEPROM_SB_REVISION_0:
14172                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14173                 break;
14174         case TG3_EEPROM_SB_REVISION_2:
14175                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14176                 break;
14177         case TG3_EEPROM_SB_REVISION_3:
14178                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14179                 break;
14180         case TG3_EEPROM_SB_REVISION_4:
14181                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14182                 break;
14183         case TG3_EEPROM_SB_REVISION_5:
14184                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14185                 break;
14186         case TG3_EEPROM_SB_REVISION_6:
14187                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14188                 break;
14189         default:
14190                 return;
14191         }
14192
14193         if (tg3_nvram_read(tp, offset, &val))
14194                 return;
14195
14196         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14197                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14198         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14199                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14200         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14201
14202         if (minor > 99 || build > 26)
14203                 return;
14204
14205         offset = strlen(tp->fw_ver);
14206         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14207                  " v%d.%02d", major, minor);
14208
14209         if (build > 0) {
14210                 offset = strlen(tp->fw_ver);
14211                 if (offset < TG3_VER_SIZE - 1)
14212                         tp->fw_ver[offset] = 'a' + build - 1;
14213         }
14214 }
14215
14216 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14217 {
14218         u32 val, offset, start;
14219         int i, vlen;
14220
14221         for (offset = TG3_NVM_DIR_START;
14222              offset < TG3_NVM_DIR_END;
14223              offset += TG3_NVM_DIRENT_SIZE) {
14224                 if (tg3_nvram_read(tp, offset, &val))
14225                         return;
14226
14227                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14228                         break;
14229         }
14230
14231         if (offset == TG3_NVM_DIR_END)
14232                 return;
14233
14234         if (!tg3_flag(tp, 5705_PLUS))
14235                 start = 0x08000000;
14236         else if (tg3_nvram_read(tp, offset - 4, &start))
14237                 return;
14238
14239         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14240             !tg3_fw_img_is_valid(tp, offset) ||
14241             tg3_nvram_read(tp, offset + 8, &val))
14242                 return;
14243
14244         offset += val - start;
14245
14246         vlen = strlen(tp->fw_ver);
14247
14248         tp->fw_ver[vlen++] = ',';
14249         tp->fw_ver[vlen++] = ' ';
14250
14251         for (i = 0; i < 4; i++) {
14252                 __be32 v;
14253                 if (tg3_nvram_read_be32(tp, offset, &v))
14254                         return;
14255
14256                 offset += sizeof(v);
14257
14258                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14259                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14260                         break;
14261                 }
14262
14263                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14264                 vlen += sizeof(v);
14265         }
14266 }
14267
14268 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14269 {
14270         u32 apedata;
14271
14272         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14273         if (apedata != APE_SEG_SIG_MAGIC)
14274                 return;
14275
14276         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14277         if (!(apedata & APE_FW_STATUS_READY))
14278                 return;
14279
14280         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14281                 tg3_flag_set(tp, APE_HAS_NCSI);
14282 }
14283
14284 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14285 {
14286         int vlen;
14287         u32 apedata;
14288         char *fwtype;
14289
14290         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14291
14292         if (tg3_flag(tp, APE_HAS_NCSI))
14293                 fwtype = "NCSI";
14294         else
14295                 fwtype = "DASH";
14296
14297         vlen = strlen(tp->fw_ver);
14298
14299         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14300                  fwtype,
14301                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14302                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14303                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14304                  (apedata & APE_FW_VERSION_BLDMSK));
14305 }
14306
14307 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14308 {
14309         u32 val;
14310         bool vpd_vers = false;
14311
14312         if (tp->fw_ver[0] != 0)
14313                 vpd_vers = true;
14314
14315         if (tg3_flag(tp, NO_NVRAM)) {
14316                 strcat(tp->fw_ver, "sb");
14317                 return;
14318         }
14319
14320         if (tg3_nvram_read(tp, 0, &val))
14321                 return;
14322
14323         if (val == TG3_EEPROM_MAGIC)
14324                 tg3_read_bc_ver(tp);
14325         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14326                 tg3_read_sb_ver(tp, val);
14327         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14328                 tg3_read_hwsb_ver(tp);
14329
14330         if (tg3_flag(tp, ENABLE_ASF)) {
14331                 if (tg3_flag(tp, ENABLE_APE)) {
14332                         tg3_probe_ncsi(tp);
14333                         if (!vpd_vers)
14334                                 tg3_read_dash_ver(tp);
14335                 } else if (!vpd_vers) {
14336                         tg3_read_mgmtfw_ver(tp);
14337                 }
14338         }
14339
14340         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14341 }
14342
14343 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14344 {
14345         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14346                 return TG3_RX_RET_MAX_SIZE_5717;
14347         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14348                 return TG3_RX_RET_MAX_SIZE_5700;
14349         else
14350                 return TG3_RX_RET_MAX_SIZE_5705;
14351 }
14352
14353 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14354         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14355         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14356         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14357         { },
14358 };
14359
14360 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14361 {
14362         struct pci_dev *peer;
14363         unsigned int func, devnr = tp->pdev->devfn & ~7;
14364
14365         for (func = 0; func < 8; func++) {
14366                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14367                 if (peer && peer != tp->pdev)
14368                         break;
14369                 pci_dev_put(peer);
14370         }
14371         /* 5704 can be configured in single-port mode, set peer to
14372          * tp->pdev in that case.
14373          */
14374         if (!peer) {
14375                 peer = tp->pdev;
14376                 return peer;
14377         }
14378
14379         /*
14380          * We don't need to keep the refcount elevated; there's no way
14381          * to remove one half of this device without removing the other
14382          */
14383         pci_dev_put(peer);
14384
14385         return peer;
14386 }
14387
14388 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14389 {
14390         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14392                 u32 reg;
14393
14394                 /* All devices that use the alternate
14395                  * ASIC REV location have a CPMU.
14396                  */
14397                 tg3_flag_set(tp, CPMU_PRESENT);
14398
14399                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14400                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14401                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14402                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14403                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14404                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14405                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14406                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14407                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14408                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14409                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14410                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14411                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14412                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14413                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14414                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14415                 else
14416                         reg = TG3PCI_PRODID_ASICREV;
14417
14418                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14419         }
14420
14421         /* Wrong chip ID in 5752 A0. This code can be removed later
14422          * as A0 is not in production.
14423          */
14424         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14425                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14426
14427         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14428             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14429             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14430                 tg3_flag_set(tp, 5717_PLUS);
14431
14432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14433             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14434                 tg3_flag_set(tp, 57765_CLASS);
14435
14436         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14437                 tg3_flag_set(tp, 57765_PLUS);
14438
14439         /* Intentionally exclude ASIC_REV_5906 */
14440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14444             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14446             tg3_flag(tp, 57765_PLUS))
14447                 tg3_flag_set(tp, 5755_PLUS);
14448
14449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14451                 tg3_flag_set(tp, 5780_CLASS);
14452
14453         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14454             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14455             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14456             tg3_flag(tp, 5755_PLUS) ||
14457             tg3_flag(tp, 5780_CLASS))
14458                 tg3_flag_set(tp, 5750_PLUS);
14459
14460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14461             tg3_flag(tp, 5750_PLUS))
14462                 tg3_flag_set(tp, 5705_PLUS);
14463 }
14464
14465 static int __devinit tg3_get_invariants(struct tg3 *tp)
14466 {
14467         u32 misc_ctrl_reg;
14468         u32 pci_state_reg, grc_misc_cfg;
14469         u32 val;
14470         u16 pci_cmd;
14471         int err;
14472
14473         /* Force memory write invalidate off.  If we leave it on,
14474          * then on 5700_BX chips we have to enable a workaround.
14475          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14476          * to match the cacheline size.  The Broadcom driver have this
14477          * workaround but turns MWI off all the times so never uses
14478          * it.  This seems to suggest that the workaround is insufficient.
14479          */
14480         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14481         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14482         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14483
14484         /* Important! -- Make sure register accesses are byteswapped
14485          * correctly.  Also, for those chips that require it, make
14486          * sure that indirect register accesses are enabled before
14487          * the first operation.
14488          */
14489         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14490                               &misc_ctrl_reg);
14491         tp->misc_host_ctrl |= (misc_ctrl_reg &
14492                                MISC_HOST_CTRL_CHIPREV);
14493         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14494                                tp->misc_host_ctrl);
14495
14496         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14497
14498         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14499          * we need to disable memory and use config. cycles
14500          * only to access all registers. The 5702/03 chips
14501          * can mistakenly decode the special cycles from the
14502          * ICH chipsets as memory write cycles, causing corruption
14503          * of register and memory space. Only certain ICH bridges
14504          * will drive special cycles with non-zero data during the
14505          * address phase which can fall within the 5703's address
14506          * range. This is not an ICH bug as the PCI spec allows
14507          * non-zero address during special cycles. However, only
14508          * these ICH bridges are known to drive non-zero addresses
14509          * during special cycles.
14510          *
14511          * Since special cycles do not cross PCI bridges, we only
14512          * enable this workaround if the 5703 is on the secondary
14513          * bus of these ICH bridges.
14514          */
14515         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14516             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14517                 static struct tg3_dev_id {
14518                         u32     vendor;
14519                         u32     device;
14520                         u32     rev;
14521                 } ich_chipsets[] = {
14522                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14523                           PCI_ANY_ID },
14524                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14525                           PCI_ANY_ID },
14526                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14527                           0xa },
14528                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14529                           PCI_ANY_ID },
14530                         { },
14531                 };
14532                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14533                 struct pci_dev *bridge = NULL;
14534
14535                 while (pci_id->vendor != 0) {
14536                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14537                                                 bridge);
14538                         if (!bridge) {
14539                                 pci_id++;
14540                                 continue;
14541                         }
14542                         if (pci_id->rev != PCI_ANY_ID) {
14543                                 if (bridge->revision > pci_id->rev)
14544                                         continue;
14545                         }
14546                         if (bridge->subordinate &&
14547                             (bridge->subordinate->number ==
14548                              tp->pdev->bus->number)) {
14549                                 tg3_flag_set(tp, ICH_WORKAROUND);
14550                                 pci_dev_put(bridge);
14551                                 break;
14552                         }
14553                 }
14554         }
14555
14556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14557                 static struct tg3_dev_id {
14558                         u32     vendor;
14559                         u32     device;
14560                 } bridge_chipsets[] = {
14561                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14562                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14563                         { },
14564                 };
14565                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14566                 struct pci_dev *bridge = NULL;
14567
14568                 while (pci_id->vendor != 0) {
14569                         bridge = pci_get_device(pci_id->vendor,
14570                                                 pci_id->device,
14571                                                 bridge);
14572                         if (!bridge) {
14573                                 pci_id++;
14574                                 continue;
14575                         }
14576                         if (bridge->subordinate &&
14577                             (bridge->subordinate->number <=
14578                              tp->pdev->bus->number) &&
14579                             (bridge->subordinate->busn_res.end >=
14580                              tp->pdev->bus->number)) {
14581                                 tg3_flag_set(tp, 5701_DMA_BUG);
14582                                 pci_dev_put(bridge);
14583                                 break;
14584                         }
14585                 }
14586         }
14587
14588         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14589          * DMA addresses > 40-bit. This bridge may have other additional
14590          * 57xx devices behind it in some 4-port NIC designs for example.
14591          * Any tg3 device found behind the bridge will also need the 40-bit
14592          * DMA workaround.
14593          */
14594         if (tg3_flag(tp, 5780_CLASS)) {
14595                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14596                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14597         } else {
14598                 struct pci_dev *bridge = NULL;
14599
14600                 do {
14601                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14602                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14603                                                 bridge);
14604                         if (bridge && bridge->subordinate &&
14605                             (bridge->subordinate->number <=
14606                              tp->pdev->bus->number) &&
14607                             (bridge->subordinate->busn_res.end >=
14608                              tp->pdev->bus->number)) {
14609                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14610                                 pci_dev_put(bridge);
14611                                 break;
14612                         }
14613                 } while (bridge);
14614         }
14615
14616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14617             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14618                 tp->pdev_peer = tg3_find_peer(tp);
14619
14620         /* Determine TSO capabilities */
14621         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14622                 ; /* Do nothing. HW bug. */
14623         else if (tg3_flag(tp, 57765_PLUS))
14624                 tg3_flag_set(tp, HW_TSO_3);
14625         else if (tg3_flag(tp, 5755_PLUS) ||
14626                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14627                 tg3_flag_set(tp, HW_TSO_2);
14628         else if (tg3_flag(tp, 5750_PLUS)) {
14629                 tg3_flag_set(tp, HW_TSO_1);
14630                 tg3_flag_set(tp, TSO_BUG);
14631                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14632                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14633                         tg3_flag_clear(tp, TSO_BUG);
14634         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14635                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14636                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14637                         tg3_flag_set(tp, TSO_BUG);
14638                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14639                         tp->fw_needed = FIRMWARE_TG3TSO5;
14640                 else
14641                         tp->fw_needed = FIRMWARE_TG3TSO;
14642         }
14643
14644         /* Selectively allow TSO based on operating conditions */
14645         if (tg3_flag(tp, HW_TSO_1) ||
14646             tg3_flag(tp, HW_TSO_2) ||
14647             tg3_flag(tp, HW_TSO_3) ||
14648             tp->fw_needed) {
14649                 /* For firmware TSO, assume ASF is disabled.
14650                  * We'll disable TSO later if we discover ASF
14651                  * is enabled in tg3_get_eeprom_hw_cfg().
14652                  */
14653                 tg3_flag_set(tp, TSO_CAPABLE);
14654         } else {
14655                 tg3_flag_clear(tp, TSO_CAPABLE);
14656                 tg3_flag_clear(tp, TSO_BUG);
14657                 tp->fw_needed = NULL;
14658         }
14659
14660         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14661                 tp->fw_needed = FIRMWARE_TG3;
14662
14663         tp->irq_max = 1;
14664
14665         if (tg3_flag(tp, 5750_PLUS)) {
14666                 tg3_flag_set(tp, SUPPORT_MSI);
14667                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14668                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14669                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14670                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14671                      tp->pdev_peer == tp->pdev))
14672                         tg3_flag_clear(tp, SUPPORT_MSI);
14673
14674                 if (tg3_flag(tp, 5755_PLUS) ||
14675                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14676                         tg3_flag_set(tp, 1SHOT_MSI);
14677                 }
14678
14679                 if (tg3_flag(tp, 57765_PLUS)) {
14680                         tg3_flag_set(tp, SUPPORT_MSIX);
14681                         tp->irq_max = TG3_IRQ_MAX_VECS;
14682                 }
14683         }
14684
14685         tp->txq_max = 1;
14686         tp->rxq_max = 1;
14687         if (tp->irq_max > 1) {
14688                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14689                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14690
14691                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14692                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14693                         tp->txq_max = tp->irq_max - 1;
14694         }
14695
14696         if (tg3_flag(tp, 5755_PLUS) ||
14697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14698                 tg3_flag_set(tp, SHORT_DMA_BUG);
14699
14700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14701                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14702
14703         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14705             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14706                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14707
14708         if (tg3_flag(tp, 57765_PLUS) &&
14709             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14710                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14711
14712         if (!tg3_flag(tp, 5705_PLUS) ||
14713             tg3_flag(tp, 5780_CLASS) ||
14714             tg3_flag(tp, USE_JUMBO_BDFLAG))
14715                 tg3_flag_set(tp, JUMBO_CAPABLE);
14716
14717         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14718                               &pci_state_reg);
14719
14720         if (pci_is_pcie(tp->pdev)) {
14721                 u16 lnkctl;
14722
14723                 tg3_flag_set(tp, PCI_EXPRESS);
14724
14725                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14726                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14727                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14728                             ASIC_REV_5906) {
14729                                 tg3_flag_clear(tp, HW_TSO_2);
14730                                 tg3_flag_clear(tp, TSO_CAPABLE);
14731                         }
14732                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14733                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14734                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14735                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14736                                 tg3_flag_set(tp, CLKREQ_BUG);
14737                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14738                         tg3_flag_set(tp, L1PLLPD_EN);
14739                 }
14740         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14741                 /* BCM5785 devices are effectively PCIe devices, and should
14742                  * follow PCIe codepaths, but do not have a PCIe capabilities
14743                  * section.
14744                  */
14745                 tg3_flag_set(tp, PCI_EXPRESS);
14746         } else if (!tg3_flag(tp, 5705_PLUS) ||
14747                    tg3_flag(tp, 5780_CLASS)) {
14748                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14749                 if (!tp->pcix_cap) {
14750                         dev_err(&tp->pdev->dev,
14751                                 "Cannot find PCI-X capability, aborting\n");
14752                         return -EIO;
14753                 }
14754
14755                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14756                         tg3_flag_set(tp, PCIX_MODE);
14757         }
14758
14759         /* If we have an AMD 762 or VIA K8T800 chipset, write
14760          * reordering to the mailbox registers done by the host
14761          * controller can cause major troubles.  We read back from
14762          * every mailbox register write to force the writes to be
14763          * posted to the chip in order.
14764          */
14765         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14766             !tg3_flag(tp, PCI_EXPRESS))
14767                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14768
14769         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14770                              &tp->pci_cacheline_sz);
14771         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14772                              &tp->pci_lat_timer);
14773         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14774             tp->pci_lat_timer < 64) {
14775                 tp->pci_lat_timer = 64;
14776                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14777                                       tp->pci_lat_timer);
14778         }
14779
14780         /* Important! -- It is critical that the PCI-X hw workaround
14781          * situation is decided before the first MMIO register access.
14782          */
14783         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14784                 /* 5700 BX chips need to have their TX producer index
14785                  * mailboxes written twice to workaround a bug.
14786                  */
14787                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14788
14789                 /* If we are in PCI-X mode, enable register write workaround.
14790                  *
14791                  * The workaround is to use indirect register accesses
14792                  * for all chip writes not to mailbox registers.
14793                  */
14794                 if (tg3_flag(tp, PCIX_MODE)) {
14795                         u32 pm_reg;
14796
14797                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14798
14799                         /* The chip can have it's power management PCI config
14800                          * space registers clobbered due to this bug.
14801                          * So explicitly force the chip into D0 here.
14802                          */
14803                         pci_read_config_dword(tp->pdev,
14804                                               tp->pm_cap + PCI_PM_CTRL,
14805                                               &pm_reg);
14806                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14807                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14808                         pci_write_config_dword(tp->pdev,
14809                                                tp->pm_cap + PCI_PM_CTRL,
14810                                                pm_reg);
14811
14812                         /* Also, force SERR#/PERR# in PCI command. */
14813                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14814                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14815                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14816                 }
14817         }
14818
14819         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14820                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14821         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14822                 tg3_flag_set(tp, PCI_32BIT);
14823
14824         /* Chip-specific fixup from Broadcom driver */
14825         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14826             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14827                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14828                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14829         }
14830
14831         /* Default fast path register access methods */
14832         tp->read32 = tg3_read32;
14833         tp->write32 = tg3_write32;
14834         tp->read32_mbox = tg3_read32;
14835         tp->write32_mbox = tg3_write32;
14836         tp->write32_tx_mbox = tg3_write32;
14837         tp->write32_rx_mbox = tg3_write32;
14838
14839         /* Various workaround register access methods */
14840         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14841                 tp->write32 = tg3_write_indirect_reg32;
14842         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14843                  (tg3_flag(tp, PCI_EXPRESS) &&
14844                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14845                 /*
14846                  * Back to back register writes can cause problems on these
14847                  * chips, the workaround is to read back all reg writes
14848                  * except those to mailbox regs.
14849                  *
14850                  * See tg3_write_indirect_reg32().
14851                  */
14852                 tp->write32 = tg3_write_flush_reg32;
14853         }
14854
14855         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14856                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14857                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14858                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14859         }
14860
14861         if (tg3_flag(tp, ICH_WORKAROUND)) {
14862                 tp->read32 = tg3_read_indirect_reg32;
14863                 tp->write32 = tg3_write_indirect_reg32;
14864                 tp->read32_mbox = tg3_read_indirect_mbox;
14865                 tp->write32_mbox = tg3_write_indirect_mbox;
14866                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14867                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14868
14869                 iounmap(tp->regs);
14870                 tp->regs = NULL;
14871
14872                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14873                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14874                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14875         }
14876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14877                 tp->read32_mbox = tg3_read32_mbox_5906;
14878                 tp->write32_mbox = tg3_write32_mbox_5906;
14879                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14880                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14881         }
14882
14883         if (tp->write32 == tg3_write_indirect_reg32 ||
14884             (tg3_flag(tp, PCIX_MODE) &&
14885              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14886               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14887                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14888
14889         /* The memory arbiter has to be enabled in order for SRAM accesses
14890          * to succeed.  Normally on powerup the tg3 chip firmware will make
14891          * sure it is enabled, but other entities such as system netboot
14892          * code might disable it.
14893          */
14894         val = tr32(MEMARB_MODE);
14895         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14896
14897         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14899             tg3_flag(tp, 5780_CLASS)) {
14900                 if (tg3_flag(tp, PCIX_MODE)) {
14901                         pci_read_config_dword(tp->pdev,
14902                                               tp->pcix_cap + PCI_X_STATUS,
14903                                               &val);
14904                         tp->pci_fn = val & 0x7;
14905                 }
14906         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14907                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14908                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14909                     NIC_SRAM_CPMUSTAT_SIG) {
14910                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14911                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14912                 }
14913         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14914                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14915                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14916                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14917                     NIC_SRAM_CPMUSTAT_SIG) {
14918                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14919                                      TG3_CPMU_STATUS_FSHFT_5719;
14920                 }
14921         }
14922
14923         /* Get eeprom hw config before calling tg3_set_power_state().
14924          * In particular, the TG3_FLAG_IS_NIC flag must be
14925          * determined before calling tg3_set_power_state() so that
14926          * we know whether or not to switch out of Vaux power.
14927          * When the flag is set, it means that GPIO1 is used for eeprom
14928          * write protect and also implies that it is a LOM where GPIOs
14929          * are not used to switch power.
14930          */
14931         tg3_get_eeprom_hw_cfg(tp);
14932
14933         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14934                 tg3_flag_clear(tp, TSO_CAPABLE);
14935                 tg3_flag_clear(tp, TSO_BUG);
14936                 tp->fw_needed = NULL;
14937         }
14938
14939         if (tg3_flag(tp, ENABLE_APE)) {
14940                 /* Allow reads and writes to the
14941                  * APE register and memory space.
14942                  */
14943                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14944                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14945                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14946                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14947                                        pci_state_reg);
14948
14949                 tg3_ape_lock_init(tp);
14950         }
14951
14952         /* Set up tp->grc_local_ctrl before calling
14953          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14954          * will bring 5700's external PHY out of reset.
14955          * It is also used as eeprom write protect on LOMs.
14956          */
14957         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14959             tg3_flag(tp, EEPROM_WRITE_PROT))
14960                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14961                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14962         /* Unused GPIO3 must be driven as output on 5752 because there
14963          * are no pull-up resistors on unused GPIO pins.
14964          */
14965         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14966                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14967
14968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14969             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14970             tg3_flag(tp, 57765_CLASS))
14971                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14972
14973         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14974             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14975                 /* Turn off the debug UART. */
14976                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14977                 if (tg3_flag(tp, IS_NIC))
14978                         /* Keep VMain power. */
14979                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14980                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14981         }
14982
14983         /* Switch out of Vaux if it is a NIC */
14984         tg3_pwrsrc_switch_to_vmain(tp);
14985
14986         /* Derive initial jumbo mode from MTU assigned in
14987          * ether_setup() via the alloc_etherdev() call
14988          */
14989         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14990                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14991
14992         /* Determine WakeOnLan speed to use. */
14993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14994             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14995             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14996             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14997                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14998         } else {
14999                 tg3_flag_set(tp, WOL_SPEED_100MB);
15000         }
15001
15002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15003                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15004
15005         /* A few boards don't want Ethernet@WireSpeed phy feature */
15006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15007             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15008              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15009              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15010             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15011             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15012                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15013
15014         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15015             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15016                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15017         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15018                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15019
15020         if (tg3_flag(tp, 5705_PLUS) &&
15021             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15022             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15023             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15024             !tg3_flag(tp, 57765_PLUS)) {
15025                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15026                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15027                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15028                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15029                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15030                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15031                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15032                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15033                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15034                 } else
15035                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15036         }
15037
15038         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15039             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15040                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15041                 if (tp->phy_otp == 0)
15042                         tp->phy_otp = TG3_OTP_DEFAULT;
15043         }
15044
15045         if (tg3_flag(tp, CPMU_PRESENT))
15046                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15047         else
15048                 tp->mi_mode = MAC_MI_MODE_BASE;
15049
15050         tp->coalesce_mode = 0;
15051         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15052             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15053                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15054
15055         /* Set these bits to enable statistics workaround. */
15056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15057             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15058             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15059                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15060                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15061         }
15062
15063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15064             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15065                 tg3_flag_set(tp, USE_PHYLIB);
15066
15067         err = tg3_mdio_init(tp);
15068         if (err)
15069                 return err;
15070
15071         /* Initialize data/descriptor byte/word swapping. */
15072         val = tr32(GRC_MODE);
15073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15074                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15075                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15076                         GRC_MODE_B2HRX_ENABLE |
15077                         GRC_MODE_HTX2B_ENABLE |
15078                         GRC_MODE_HOST_STACKUP);
15079         else
15080                 val &= GRC_MODE_HOST_STACKUP;
15081
15082         tw32(GRC_MODE, val | tp->grc_mode);
15083
15084         tg3_switch_clocks(tp);
15085
15086         /* Clear this out for sanity. */
15087         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15088
15089         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15090                               &pci_state_reg);
15091         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15092             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15093                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15094
15095                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15096                     chiprevid == CHIPREV_ID_5701_B0 ||
15097                     chiprevid == CHIPREV_ID_5701_B2 ||
15098                     chiprevid == CHIPREV_ID_5701_B5) {
15099                         void __iomem *sram_base;
15100
15101                         /* Write some dummy words into the SRAM status block
15102                          * area, see if it reads back correctly.  If the return
15103                          * value is bad, force enable the PCIX workaround.
15104                          */
15105                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15106
15107                         writel(0x00000000, sram_base);
15108                         writel(0x00000000, sram_base + 4);
15109                         writel(0xffffffff, sram_base + 4);
15110                         if (readl(sram_base) != 0x00000000)
15111                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15112                 }
15113         }
15114
15115         udelay(50);
15116         tg3_nvram_init(tp);
15117
15118         grc_misc_cfg = tr32(GRC_MISC_CFG);
15119         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15120
15121         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15122             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15123              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15124                 tg3_flag_set(tp, IS_5788);
15125
15126         if (!tg3_flag(tp, IS_5788) &&
15127             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15128                 tg3_flag_set(tp, TAGGED_STATUS);
15129         if (tg3_flag(tp, TAGGED_STATUS)) {
15130                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15131                                       HOSTCC_MODE_CLRTICK_TXBD);
15132
15133                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15134                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15135                                        tp->misc_host_ctrl);
15136         }
15137
15138         /* Preserve the APE MAC_MODE bits */
15139         if (tg3_flag(tp, ENABLE_APE))
15140                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15141         else
15142                 tp->mac_mode = 0;
15143
15144         /* these are limited to 10/100 only */
15145         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15146              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15147             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15148              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15149              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15150               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15151               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15152             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15153              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15154               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15155               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15156             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15157             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15158             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15159             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15160                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15161
15162         err = tg3_phy_probe(tp);
15163         if (err) {
15164                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15165                 /* ... but do not return immediately ... */
15166                 tg3_mdio_fini(tp);
15167         }
15168
15169         tg3_read_vpd(tp);
15170         tg3_read_fw_ver(tp);
15171
15172         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15173                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15174         } else {
15175                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15176                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15177                 else
15178                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15179         }
15180
15181         /* 5700 {AX,BX} chips have a broken status block link
15182          * change bit implementation, so we must use the
15183          * status register in those cases.
15184          */
15185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15186                 tg3_flag_set(tp, USE_LINKCHG_REG);
15187         else
15188                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15189
15190         /* The led_ctrl is set during tg3_phy_probe, here we might
15191          * have to force the link status polling mechanism based
15192          * upon subsystem IDs.
15193          */
15194         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15195             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15196             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15197                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15198                 tg3_flag_set(tp, USE_LINKCHG_REG);
15199         }
15200
15201         /* For all SERDES we poll the MAC status register. */
15202         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15203                 tg3_flag_set(tp, POLL_SERDES);
15204         else
15205                 tg3_flag_clear(tp, POLL_SERDES);
15206
15207         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15208         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15209         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15210             tg3_flag(tp, PCIX_MODE)) {
15211                 tp->rx_offset = NET_SKB_PAD;
15212 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15213                 tp->rx_copy_thresh = ~(u16)0;
15214 #endif
15215         }
15216
15217         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15218         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15219         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15220
15221         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15222
15223         /* Increment the rx prod index on the rx std ring by at most
15224          * 8 for these chips to workaround hw errata.
15225          */
15226         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15227             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15228             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15229                 tp->rx_std_max_post = 8;
15230
15231         if (tg3_flag(tp, ASPM_WORKAROUND))
15232                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15233                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15234
15235         return err;
15236 }
15237
15238 #ifdef CONFIG_SPARC
15239 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15240 {
15241         struct net_device *dev = tp->dev;
15242         struct pci_dev *pdev = tp->pdev;
15243         struct device_node *dp = pci_device_to_OF_node(pdev);
15244         const unsigned char *addr;
15245         int len;
15246
15247         addr = of_get_property(dp, "local-mac-address", &len);
15248         if (addr && len == 6) {
15249                 memcpy(dev->dev_addr, addr, 6);
15250                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15251                 return 0;
15252         }
15253         return -ENODEV;
15254 }
15255
15256 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15257 {
15258         struct net_device *dev = tp->dev;
15259
15260         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15261         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15262         return 0;
15263 }
15264 #endif
15265
15266 static int __devinit tg3_get_device_address(struct tg3 *tp)
15267 {
15268         struct net_device *dev = tp->dev;
15269         u32 hi, lo, mac_offset;
15270         int addr_ok = 0;
15271
15272 #ifdef CONFIG_SPARC
15273         if (!tg3_get_macaddr_sparc(tp))
15274                 return 0;
15275 #endif
15276
15277         mac_offset = 0x7c;
15278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15279             tg3_flag(tp, 5780_CLASS)) {
15280                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15281                         mac_offset = 0xcc;
15282                 if (tg3_nvram_lock(tp))
15283                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15284                 else
15285                         tg3_nvram_unlock(tp);
15286         } else if (tg3_flag(tp, 5717_PLUS)) {
15287                 if (tp->pci_fn & 1)
15288                         mac_offset = 0xcc;
15289                 if (tp->pci_fn > 1)
15290                         mac_offset += 0x18c;
15291         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15292                 mac_offset = 0x10;
15293
15294         /* First try to get it from MAC address mailbox. */
15295         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15296         if ((hi >> 16) == 0x484b) {
15297                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15298                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15299
15300                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15301                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15302                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15303                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15304                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15305
15306                 /* Some old bootcode may report a 0 MAC address in SRAM */
15307                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15308         }
15309         if (!addr_ok) {
15310                 /* Next, try NVRAM. */
15311                 if (!tg3_flag(tp, NO_NVRAM) &&
15312                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15313                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15314                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15315                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15316                 }
15317                 /* Finally just fetch it out of the MAC control regs. */
15318                 else {
15319                         hi = tr32(MAC_ADDR_0_HIGH);
15320                         lo = tr32(MAC_ADDR_0_LOW);
15321
15322                         dev->dev_addr[5] = lo & 0xff;
15323                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15324                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15325                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15326                         dev->dev_addr[1] = hi & 0xff;
15327                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15328                 }
15329         }
15330
15331         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15332 #ifdef CONFIG_SPARC
15333                 if (!tg3_get_default_macaddr_sparc(tp))
15334                         return 0;
15335 #endif
15336                 return -EINVAL;
15337         }
15338         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15339         return 0;
15340 }
15341
15342 #define BOUNDARY_SINGLE_CACHELINE       1
15343 #define BOUNDARY_MULTI_CACHELINE        2
15344
15345 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15346 {
15347         int cacheline_size;
15348         u8 byte;
15349         int goal;
15350
15351         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15352         if (byte == 0)
15353                 cacheline_size = 1024;
15354         else
15355                 cacheline_size = (int) byte * 4;
15356
15357         /* On 5703 and later chips, the boundary bits have no
15358          * effect.
15359          */
15360         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15361             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15362             !tg3_flag(tp, PCI_EXPRESS))
15363                 goto out;
15364
15365 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15366         goal = BOUNDARY_MULTI_CACHELINE;
15367 #else
15368 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15369         goal = BOUNDARY_SINGLE_CACHELINE;
15370 #else
15371         goal = 0;
15372 #endif
15373 #endif
15374
15375         if (tg3_flag(tp, 57765_PLUS)) {
15376                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15377                 goto out;
15378         }
15379
15380         if (!goal)
15381                 goto out;
15382
15383         /* PCI controllers on most RISC systems tend to disconnect
15384          * when a device tries to burst across a cache-line boundary.
15385          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15386          *
15387          * Unfortunately, for PCI-E there are only limited
15388          * write-side controls for this, and thus for reads
15389          * we will still get the disconnects.  We'll also waste
15390          * these PCI cycles for both read and write for chips
15391          * other than 5700 and 5701 which do not implement the
15392          * boundary bits.
15393          */
15394         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15395                 switch (cacheline_size) {
15396                 case 16:
15397                 case 32:
15398                 case 64:
15399                 case 128:
15400                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15401                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15402                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15403                         } else {
15404                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15405                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15406                         }
15407                         break;
15408
15409                 case 256:
15410                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15411                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15412                         break;
15413
15414                 default:
15415                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15416                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15417                         break;
15418                 }
15419         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15420                 switch (cacheline_size) {
15421                 case 16:
15422                 case 32:
15423                 case 64:
15424                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15425                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15426                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15427                                 break;
15428                         }
15429                         /* fallthrough */
15430                 case 128:
15431                 default:
15432                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15433                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15434                         break;
15435                 }
15436         } else {
15437                 switch (cacheline_size) {
15438                 case 16:
15439                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15440                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15441                                         DMA_RWCTRL_WRITE_BNDRY_16);
15442                                 break;
15443                         }
15444                         /* fallthrough */
15445                 case 32:
15446                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15447                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15448                                         DMA_RWCTRL_WRITE_BNDRY_32);
15449                                 break;
15450                         }
15451                         /* fallthrough */
15452                 case 64:
15453                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15454                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15455                                         DMA_RWCTRL_WRITE_BNDRY_64);
15456                                 break;
15457                         }
15458                         /* fallthrough */
15459                 case 128:
15460                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15461                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15462                                         DMA_RWCTRL_WRITE_BNDRY_128);
15463                                 break;
15464                         }
15465                         /* fallthrough */
15466                 case 256:
15467                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15468                                 DMA_RWCTRL_WRITE_BNDRY_256);
15469                         break;
15470                 case 512:
15471                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15472                                 DMA_RWCTRL_WRITE_BNDRY_512);
15473                         break;
15474                 case 1024:
15475                 default:
15476                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15477                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15478                         break;
15479                 }
15480         }
15481
15482 out:
15483         return val;
15484 }
15485
15486 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15487 {
15488         struct tg3_internal_buffer_desc test_desc;
15489         u32 sram_dma_descs;
15490         int i, ret;
15491
15492         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15493
15494         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15495         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15496         tw32(RDMAC_STATUS, 0);
15497         tw32(WDMAC_STATUS, 0);
15498
15499         tw32(BUFMGR_MODE, 0);
15500         tw32(FTQ_RESET, 0);
15501
15502         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15503         test_desc.addr_lo = buf_dma & 0xffffffff;
15504         test_desc.nic_mbuf = 0x00002100;
15505         test_desc.len = size;
15506
15507         /*
15508          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15509          * the *second* time the tg3 driver was getting loaded after an
15510          * initial scan.
15511          *
15512          * Broadcom tells me:
15513          *   ...the DMA engine is connected to the GRC block and a DMA
15514          *   reset may affect the GRC block in some unpredictable way...
15515          *   The behavior of resets to individual blocks has not been tested.
15516          *
15517          * Broadcom noted the GRC reset will also reset all sub-components.
15518          */
15519         if (to_device) {
15520                 test_desc.cqid_sqid = (13 << 8) | 2;
15521
15522                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15523                 udelay(40);
15524         } else {
15525                 test_desc.cqid_sqid = (16 << 8) | 7;
15526
15527                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15528                 udelay(40);
15529         }
15530         test_desc.flags = 0x00000005;
15531
15532         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15533                 u32 val;
15534
15535                 val = *(((u32 *)&test_desc) + i);
15536                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15537                                        sram_dma_descs + (i * sizeof(u32)));
15538                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15539         }
15540         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15541
15542         if (to_device)
15543                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15544         else
15545                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15546
15547         ret = -ENODEV;
15548         for (i = 0; i < 40; i++) {
15549                 u32 val;
15550
15551                 if (to_device)
15552                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15553                 else
15554                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15555                 if ((val & 0xffff) == sram_dma_descs) {
15556                         ret = 0;
15557                         break;
15558                 }
15559
15560                 udelay(100);
15561         }
15562
15563         return ret;
15564 }
15565
15566 #define TEST_BUFFER_SIZE        0x2000
15567
15568 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15569         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15570         { },
15571 };
15572
15573 static int __devinit tg3_test_dma(struct tg3 *tp)
15574 {
15575         dma_addr_t buf_dma;
15576         u32 *buf, saved_dma_rwctrl;
15577         int ret = 0;
15578
15579         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15580                                  &buf_dma, GFP_KERNEL);
15581         if (!buf) {
15582                 ret = -ENOMEM;
15583                 goto out_nofree;
15584         }
15585
15586         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15587                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15588
15589         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15590
15591         if (tg3_flag(tp, 57765_PLUS))
15592                 goto out;
15593
15594         if (tg3_flag(tp, PCI_EXPRESS)) {
15595                 /* DMA read watermark not used on PCIE */
15596                 tp->dma_rwctrl |= 0x00180000;
15597         } else if (!tg3_flag(tp, PCIX_MODE)) {
15598                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15599                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15600                         tp->dma_rwctrl |= 0x003f0000;
15601                 else
15602                         tp->dma_rwctrl |= 0x003f000f;
15603         } else {
15604                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15605                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15606                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15607                         u32 read_water = 0x7;
15608
15609                         /* If the 5704 is behind the EPB bridge, we can
15610                          * do the less restrictive ONE_DMA workaround for
15611                          * better performance.
15612                          */
15613                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15614                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15615                                 tp->dma_rwctrl |= 0x8000;
15616                         else if (ccval == 0x6 || ccval == 0x7)
15617                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15618
15619                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15620                                 read_water = 4;
15621                         /* Set bit 23 to enable PCIX hw bug fix */
15622                         tp->dma_rwctrl |=
15623                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15624                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15625                                 (1 << 23);
15626                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15627                         /* 5780 always in PCIX mode */
15628                         tp->dma_rwctrl |= 0x00144000;
15629                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15630                         /* 5714 always in PCIX mode */
15631                         tp->dma_rwctrl |= 0x00148000;
15632                 } else {
15633                         tp->dma_rwctrl |= 0x001b000f;
15634                 }
15635         }
15636
15637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15639                 tp->dma_rwctrl &= 0xfffffff0;
15640
15641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15642             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15643                 /* Remove this if it causes problems for some boards. */
15644                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15645
15646                 /* On 5700/5701 chips, we need to set this bit.
15647                  * Otherwise the chip will issue cacheline transactions
15648                  * to streamable DMA memory with not all the byte
15649                  * enables turned on.  This is an error on several
15650                  * RISC PCI controllers, in particular sparc64.
15651                  *
15652                  * On 5703/5704 chips, this bit has been reassigned
15653                  * a different meaning.  In particular, it is used
15654                  * on those chips to enable a PCI-X workaround.
15655                  */
15656                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15657         }
15658
15659         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15660
15661 #if 0
15662         /* Unneeded, already done by tg3_get_invariants.  */
15663         tg3_switch_clocks(tp);
15664 #endif
15665
15666         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15667             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15668                 goto out;
15669
15670         /* It is best to perform DMA test with maximum write burst size
15671          * to expose the 5700/5701 write DMA bug.
15672          */
15673         saved_dma_rwctrl = tp->dma_rwctrl;
15674         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15675         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15676
15677         while (1) {
15678                 u32 *p = buf, i;
15679
15680                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15681                         p[i] = i;
15682
15683                 /* Send the buffer to the chip. */
15684                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15685                 if (ret) {
15686                         dev_err(&tp->pdev->dev,
15687                                 "%s: Buffer write failed. err = %d\n",
15688                                 __func__, ret);
15689                         break;
15690                 }
15691
15692 #if 0
15693                 /* validate data reached card RAM correctly. */
15694                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15695                         u32 val;
15696                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15697                         if (le32_to_cpu(val) != p[i]) {
15698                                 dev_err(&tp->pdev->dev,
15699                                         "%s: Buffer corrupted on device! "
15700                                         "(%d != %d)\n", __func__, val, i);
15701                                 /* ret = -ENODEV here? */
15702                         }
15703                         p[i] = 0;
15704                 }
15705 #endif
15706                 /* Now read it back. */
15707                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15708                 if (ret) {
15709                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15710                                 "err = %d\n", __func__, ret);
15711                         break;
15712                 }
15713
15714                 /* Verify it. */
15715                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15716                         if (p[i] == i)
15717                                 continue;
15718
15719                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15720                             DMA_RWCTRL_WRITE_BNDRY_16) {
15721                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15722                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15723                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15724                                 break;
15725                         } else {
15726                                 dev_err(&tp->pdev->dev,
15727                                         "%s: Buffer corrupted on read back! "
15728                                         "(%d != %d)\n", __func__, p[i], i);
15729                                 ret = -ENODEV;
15730                                 goto out;
15731                         }
15732                 }
15733
15734                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15735                         /* Success. */
15736                         ret = 0;
15737                         break;
15738                 }
15739         }
15740         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15741             DMA_RWCTRL_WRITE_BNDRY_16) {
15742                 /* DMA test passed without adjusting DMA boundary,
15743                  * now look for chipsets that are known to expose the
15744                  * DMA bug without failing the test.
15745                  */
15746                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15747                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15748                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15749                 } else {
15750                         /* Safe to use the calculated DMA boundary. */
15751                         tp->dma_rwctrl = saved_dma_rwctrl;
15752                 }
15753
15754                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15755         }
15756
15757 out:
15758         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15759 out_nofree:
15760         return ret;
15761 }
15762
15763 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15764 {
15765         if (tg3_flag(tp, 57765_PLUS)) {
15766                 tp->bufmgr_config.mbuf_read_dma_low_water =
15767                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15768                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15769                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15770                 tp->bufmgr_config.mbuf_high_water =
15771                         DEFAULT_MB_HIGH_WATER_57765;
15772
15773                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15774                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15775                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15776                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15777                 tp->bufmgr_config.mbuf_high_water_jumbo =
15778                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15779         } else if (tg3_flag(tp, 5705_PLUS)) {
15780                 tp->bufmgr_config.mbuf_read_dma_low_water =
15781                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15782                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15783                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15784                 tp->bufmgr_config.mbuf_high_water =
15785                         DEFAULT_MB_HIGH_WATER_5705;
15786                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15787                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15788                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15789                         tp->bufmgr_config.mbuf_high_water =
15790                                 DEFAULT_MB_HIGH_WATER_5906;
15791                 }
15792
15793                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15794                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15795                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15796                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15797                 tp->bufmgr_config.mbuf_high_water_jumbo =
15798                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15799         } else {
15800                 tp->bufmgr_config.mbuf_read_dma_low_water =
15801                         DEFAULT_MB_RDMA_LOW_WATER;
15802                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15803                         DEFAULT_MB_MACRX_LOW_WATER;
15804                 tp->bufmgr_config.mbuf_high_water =
15805                         DEFAULT_MB_HIGH_WATER;
15806
15807                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15808                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15809                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15810                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15811                 tp->bufmgr_config.mbuf_high_water_jumbo =
15812                         DEFAULT_MB_HIGH_WATER_JUMBO;
15813         }
15814
15815         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15816         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15817 }
15818
15819 static char * __devinit tg3_phy_string(struct tg3 *tp)
15820 {
15821         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15822         case TG3_PHY_ID_BCM5400:        return "5400";
15823         case TG3_PHY_ID_BCM5401:        return "5401";
15824         case TG3_PHY_ID_BCM5411:        return "5411";
15825         case TG3_PHY_ID_BCM5701:        return "5701";
15826         case TG3_PHY_ID_BCM5703:        return "5703";
15827         case TG3_PHY_ID_BCM5704:        return "5704";
15828         case TG3_PHY_ID_BCM5705:        return "5705";
15829         case TG3_PHY_ID_BCM5750:        return "5750";
15830         case TG3_PHY_ID_BCM5752:        return "5752";
15831         case TG3_PHY_ID_BCM5714:        return "5714";
15832         case TG3_PHY_ID_BCM5780:        return "5780";
15833         case TG3_PHY_ID_BCM5755:        return "5755";
15834         case TG3_PHY_ID_BCM5787:        return "5787";
15835         case TG3_PHY_ID_BCM5784:        return "5784";
15836         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15837         case TG3_PHY_ID_BCM5906:        return "5906";
15838         case TG3_PHY_ID_BCM5761:        return "5761";
15839         case TG3_PHY_ID_BCM5718C:       return "5718C";
15840         case TG3_PHY_ID_BCM5718S:       return "5718S";
15841         case TG3_PHY_ID_BCM57765:       return "57765";
15842         case TG3_PHY_ID_BCM5719C:       return "5719C";
15843         case TG3_PHY_ID_BCM5720C:       return "5720C";
15844         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15845         case 0:                 return "serdes";
15846         default:                return "unknown";
15847         }
15848 }
15849
15850 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15851 {
15852         if (tg3_flag(tp, PCI_EXPRESS)) {
15853                 strcpy(str, "PCI Express");
15854                 return str;
15855         } else if (tg3_flag(tp, PCIX_MODE)) {
15856                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15857
15858                 strcpy(str, "PCIX:");
15859
15860                 if ((clock_ctrl == 7) ||
15861                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15862                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15863                         strcat(str, "133MHz");
15864                 else if (clock_ctrl == 0)
15865                         strcat(str, "33MHz");
15866                 else if (clock_ctrl == 2)
15867                         strcat(str, "50MHz");
15868                 else if (clock_ctrl == 4)
15869                         strcat(str, "66MHz");
15870                 else if (clock_ctrl == 6)
15871                         strcat(str, "100MHz");
15872         } else {
15873                 strcpy(str, "PCI:");
15874                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15875                         strcat(str, "66MHz");
15876                 else
15877                         strcat(str, "33MHz");
15878         }
15879         if (tg3_flag(tp, PCI_32BIT))
15880                 strcat(str, ":32-bit");
15881         else
15882                 strcat(str, ":64-bit");
15883         return str;
15884 }
15885
15886 static void __devinit tg3_init_coal(struct tg3 *tp)
15887 {
15888         struct ethtool_coalesce *ec = &tp->coal;
15889
15890         memset(ec, 0, sizeof(*ec));
15891         ec->cmd = ETHTOOL_GCOALESCE;
15892         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15893         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15894         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15895         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15896         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15897         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15898         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15899         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15900         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15901
15902         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15903                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15904                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15905                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15906                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15907                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15908         }
15909
15910         if (tg3_flag(tp, 5705_PLUS)) {
15911                 ec->rx_coalesce_usecs_irq = 0;
15912                 ec->tx_coalesce_usecs_irq = 0;
15913                 ec->stats_block_coalesce_usecs = 0;
15914         }
15915 }
15916
15917 static int __devinit tg3_init_one(struct pci_dev *pdev,
15918                                   const struct pci_device_id *ent)
15919 {
15920         struct net_device *dev;
15921         struct tg3 *tp;
15922         int i, err, pm_cap;
15923         u32 sndmbx, rcvmbx, intmbx;
15924         char str[40];
15925         u64 dma_mask, persist_dma_mask;
15926         netdev_features_t features = 0;
15927
15928         printk_once(KERN_INFO "%s\n", version);
15929
15930         err = pci_enable_device(pdev);
15931         if (err) {
15932                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15933                 return err;
15934         }
15935
15936         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15937         if (err) {
15938                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15939                 goto err_out_disable_pdev;
15940         }
15941
15942         pci_set_master(pdev);
15943
15944         /* Find power-management capability. */
15945         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15946         if (pm_cap == 0) {
15947                 dev_err(&pdev->dev,
15948                         "Cannot find Power Management capability, aborting\n");
15949                 err = -EIO;
15950                 goto err_out_free_res;
15951         }
15952
15953         err = pci_set_power_state(pdev, PCI_D0);
15954         if (err) {
15955                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15956                 goto err_out_free_res;
15957         }
15958
15959         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15960         if (!dev) {
15961                 err = -ENOMEM;
15962                 goto err_out_power_down;
15963         }
15964
15965         SET_NETDEV_DEV(dev, &pdev->dev);
15966
15967         tp = netdev_priv(dev);
15968         tp->pdev = pdev;
15969         tp->dev = dev;
15970         tp->pm_cap = pm_cap;
15971         tp->rx_mode = TG3_DEF_RX_MODE;
15972         tp->tx_mode = TG3_DEF_TX_MODE;
15973
15974         if (tg3_debug > 0)
15975                 tp->msg_enable = tg3_debug;
15976         else
15977                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15978
15979         /* The word/byte swap controls here control register access byte
15980          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15981          * setting below.
15982          */
15983         tp->misc_host_ctrl =
15984                 MISC_HOST_CTRL_MASK_PCI_INT |
15985                 MISC_HOST_CTRL_WORD_SWAP |
15986                 MISC_HOST_CTRL_INDIR_ACCESS |
15987                 MISC_HOST_CTRL_PCISTATE_RW;
15988
15989         /* The NONFRM (non-frame) byte/word swap controls take effect
15990          * on descriptor entries, anything which isn't packet data.
15991          *
15992          * The StrongARM chips on the board (one for tx, one for rx)
15993          * are running in big-endian mode.
15994          */
15995         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15996                         GRC_MODE_WSWAP_NONFRM_DATA);
15997 #ifdef __BIG_ENDIAN
15998         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15999 #endif
16000         spin_lock_init(&tp->lock);
16001         spin_lock_init(&tp->indirect_lock);
16002         INIT_WORK(&tp->reset_task, tg3_reset_task);
16003
16004         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16005         if (!tp->regs) {
16006                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16007                 err = -ENOMEM;
16008                 goto err_out_free_dev;
16009         }
16010
16011         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16012             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16013             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16014             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16015             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16016             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16017             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16018             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16019                 tg3_flag_set(tp, ENABLE_APE);
16020                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16021                 if (!tp->aperegs) {
16022                         dev_err(&pdev->dev,
16023                                 "Cannot map APE registers, aborting\n");
16024                         err = -ENOMEM;
16025                         goto err_out_iounmap;
16026                 }
16027         }
16028
16029         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16030         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16031
16032         dev->ethtool_ops = &tg3_ethtool_ops;
16033         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16034         dev->netdev_ops = &tg3_netdev_ops;
16035         dev->irq = pdev->irq;
16036
16037         err = tg3_get_invariants(tp);
16038         if (err) {
16039                 dev_err(&pdev->dev,
16040                         "Problem fetching invariants of chip, aborting\n");
16041                 goto err_out_apeunmap;
16042         }
16043
16044         /* The EPB bridge inside 5714, 5715, and 5780 and any
16045          * device behind the EPB cannot support DMA addresses > 40-bit.
16046          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16047          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16048          * do DMA address check in tg3_start_xmit().
16049          */
16050         if (tg3_flag(tp, IS_5788))
16051                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16052         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16053                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16054 #ifdef CONFIG_HIGHMEM
16055                 dma_mask = DMA_BIT_MASK(64);
16056 #endif
16057         } else
16058                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16059
16060         /* Configure DMA attributes. */
16061         if (dma_mask > DMA_BIT_MASK(32)) {
16062                 err = pci_set_dma_mask(pdev, dma_mask);
16063                 if (!err) {
16064                         features |= NETIF_F_HIGHDMA;
16065                         err = pci_set_consistent_dma_mask(pdev,
16066                                                           persist_dma_mask);
16067                         if (err < 0) {
16068                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16069                                         "DMA for consistent allocations\n");
16070                                 goto err_out_apeunmap;
16071                         }
16072                 }
16073         }
16074         if (err || dma_mask == DMA_BIT_MASK(32)) {
16075                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16076                 if (err) {
16077                         dev_err(&pdev->dev,
16078                                 "No usable DMA configuration, aborting\n");
16079                         goto err_out_apeunmap;
16080                 }
16081         }
16082
16083         tg3_init_bufmgr_config(tp);
16084
16085         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16086
16087         /* 5700 B0 chips do not support checksumming correctly due
16088          * to hardware bugs.
16089          */
16090         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16091                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16092
16093                 if (tg3_flag(tp, 5755_PLUS))
16094                         features |= NETIF_F_IPV6_CSUM;
16095         }
16096
16097         /* TSO is on by default on chips that support hardware TSO.
16098          * Firmware TSO on older chips gives lower performance, so it
16099          * is off by default, but can be enabled using ethtool.
16100          */
16101         if ((tg3_flag(tp, HW_TSO_1) ||
16102              tg3_flag(tp, HW_TSO_2) ||
16103              tg3_flag(tp, HW_TSO_3)) &&
16104             (features & NETIF_F_IP_CSUM))
16105                 features |= NETIF_F_TSO;
16106         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16107                 if (features & NETIF_F_IPV6_CSUM)
16108                         features |= NETIF_F_TSO6;
16109                 if (tg3_flag(tp, HW_TSO_3) ||
16110                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16111                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16112                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16113                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16114                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16115                         features |= NETIF_F_TSO_ECN;
16116         }
16117
16118         dev->features |= features;
16119         dev->vlan_features |= features;
16120
16121         /*
16122          * Add loopback capability only for a subset of devices that support
16123          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16124          * loopback for the remaining devices.
16125          */
16126         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16127             !tg3_flag(tp, CPMU_PRESENT))
16128                 /* Add the loopback capability */
16129                 features |= NETIF_F_LOOPBACK;
16130
16131         dev->hw_features |= features;
16132
16133         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16134             !tg3_flag(tp, TSO_CAPABLE) &&
16135             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16136                 tg3_flag_set(tp, MAX_RXPEND_64);
16137                 tp->rx_pending = 63;
16138         }
16139
16140         err = tg3_get_device_address(tp);
16141         if (err) {
16142                 dev_err(&pdev->dev,
16143                         "Could not obtain valid ethernet address, aborting\n");
16144                 goto err_out_apeunmap;
16145         }
16146
16147         /*
16148          * Reset chip in case UNDI or EFI driver did not shutdown
16149          * DMA self test will enable WDMAC and we'll see (spurious)
16150          * pending DMA on the PCI bus at that point.
16151          */
16152         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16153             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16154                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16155                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16156         }
16157
16158         err = tg3_test_dma(tp);
16159         if (err) {
16160                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16161                 goto err_out_apeunmap;
16162         }
16163
16164         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16165         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16166         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16167         for (i = 0; i < tp->irq_max; i++) {
16168                 struct tg3_napi *tnapi = &tp->napi[i];
16169
16170                 tnapi->tp = tp;
16171                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16172
16173                 tnapi->int_mbox = intmbx;
16174                 if (i <= 4)
16175                         intmbx += 0x8;
16176                 else
16177                         intmbx += 0x4;
16178
16179                 tnapi->consmbox = rcvmbx;
16180                 tnapi->prodmbox = sndmbx;
16181
16182                 if (i)
16183                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16184                 else
16185                         tnapi->coal_now = HOSTCC_MODE_NOW;
16186
16187                 if (!tg3_flag(tp, SUPPORT_MSIX))
16188                         break;
16189
16190                 /*
16191                  * If we support MSIX, we'll be using RSS.  If we're using
16192                  * RSS, the first vector only handles link interrupts and the
16193                  * remaining vectors handle rx and tx interrupts.  Reuse the
16194                  * mailbox values for the next iteration.  The values we setup
16195                  * above are still useful for the single vectored mode.
16196                  */
16197                 if (!i)
16198                         continue;
16199
16200                 rcvmbx += 0x8;
16201
16202                 if (sndmbx & 0x4)
16203                         sndmbx -= 0x4;
16204                 else
16205                         sndmbx += 0xc;
16206         }
16207
16208         tg3_init_coal(tp);
16209
16210         pci_set_drvdata(pdev, dev);
16211
16212         if (tg3_flag(tp, 5717_PLUS)) {
16213                 /* Resume a low-power mode */
16214                 tg3_frob_aux_power(tp, false);
16215         }
16216
16217         tg3_timer_init(tp);
16218
16219         err = register_netdev(dev);
16220         if (err) {
16221                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16222                 goto err_out_apeunmap;
16223         }
16224
16225         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16226                     tp->board_part_number,
16227                     tp->pci_chip_rev_id,
16228                     tg3_bus_string(tp, str),
16229                     dev->dev_addr);
16230
16231         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16232                 struct phy_device *phydev;
16233                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16234                 netdev_info(dev,
16235                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16236                             phydev->drv->name, dev_name(&phydev->dev));
16237         } else {
16238                 char *ethtype;
16239
16240                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16241                         ethtype = "10/100Base-TX";
16242                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16243                         ethtype = "1000Base-SX";
16244                 else
16245                         ethtype = "10/100/1000Base-T";
16246
16247                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16248                             "(WireSpeed[%d], EEE[%d])\n",
16249                             tg3_phy_string(tp), ethtype,
16250                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16251                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16252         }
16253
16254         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16255                     (dev->features & NETIF_F_RXCSUM) != 0,
16256                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16257                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16258                     tg3_flag(tp, ENABLE_ASF) != 0,
16259                     tg3_flag(tp, TSO_CAPABLE) != 0);
16260         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16261                     tp->dma_rwctrl,
16262                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16263                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16264
16265         pci_save_state(pdev);
16266
16267         return 0;
16268
16269 err_out_apeunmap:
16270         if (tp->aperegs) {
16271                 iounmap(tp->aperegs);
16272                 tp->aperegs = NULL;
16273         }
16274
16275 err_out_iounmap:
16276         if (tp->regs) {
16277                 iounmap(tp->regs);
16278                 tp->regs = NULL;
16279         }
16280
16281 err_out_free_dev:
16282         free_netdev(dev);
16283
16284 err_out_power_down:
16285         pci_set_power_state(pdev, PCI_D3hot);
16286
16287 err_out_free_res:
16288         pci_release_regions(pdev);
16289
16290 err_out_disable_pdev:
16291         pci_disable_device(pdev);
16292         pci_set_drvdata(pdev, NULL);
16293         return err;
16294 }
16295
16296 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16297 {
16298         struct net_device *dev = pci_get_drvdata(pdev);
16299
16300         if (dev) {
16301                 struct tg3 *tp = netdev_priv(dev);
16302
16303                 release_firmware(tp->fw);
16304
16305                 tg3_reset_task_cancel(tp);
16306
16307                 if (tg3_flag(tp, USE_PHYLIB)) {
16308                         tg3_phy_fini(tp);
16309                         tg3_mdio_fini(tp);
16310                 }
16311
16312                 unregister_netdev(dev);
16313                 if (tp->aperegs) {
16314                         iounmap(tp->aperegs);
16315                         tp->aperegs = NULL;
16316                 }
16317                 if (tp->regs) {
16318                         iounmap(tp->regs);
16319                         tp->regs = NULL;
16320                 }
16321                 free_netdev(dev);
16322                 pci_release_regions(pdev);
16323                 pci_disable_device(pdev);
16324                 pci_set_drvdata(pdev, NULL);
16325         }
16326 }
16327
16328 #ifdef CONFIG_PM_SLEEP
16329 static int tg3_suspend(struct device *device)
16330 {
16331         struct pci_dev *pdev = to_pci_dev(device);
16332         struct net_device *dev = pci_get_drvdata(pdev);
16333         struct tg3 *tp = netdev_priv(dev);
16334         int err;
16335
16336         if (!netif_running(dev))
16337                 return 0;
16338
16339         tg3_reset_task_cancel(tp);
16340         tg3_phy_stop(tp);
16341         tg3_netif_stop(tp);
16342
16343         tg3_timer_stop(tp);
16344
16345         tg3_full_lock(tp, 1);
16346         tg3_disable_ints(tp);
16347         tg3_full_unlock(tp);
16348
16349         netif_device_detach(dev);
16350
16351         tg3_full_lock(tp, 0);
16352         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16353         tg3_flag_clear(tp, INIT_COMPLETE);
16354         tg3_full_unlock(tp);
16355
16356         err = tg3_power_down_prepare(tp);
16357         if (err) {
16358                 int err2;
16359
16360                 tg3_full_lock(tp, 0);
16361
16362                 tg3_flag_set(tp, INIT_COMPLETE);
16363                 err2 = tg3_restart_hw(tp, 1);
16364                 if (err2)
16365                         goto out;
16366
16367                 tg3_timer_start(tp);
16368
16369                 netif_device_attach(dev);
16370                 tg3_netif_start(tp);
16371
16372 out:
16373                 tg3_full_unlock(tp);
16374
16375                 if (!err2)
16376                         tg3_phy_start(tp);
16377         }
16378
16379         return err;
16380 }
16381
16382 static int tg3_resume(struct device *device)
16383 {
16384         struct pci_dev *pdev = to_pci_dev(device);
16385         struct net_device *dev = pci_get_drvdata(pdev);
16386         struct tg3 *tp = netdev_priv(dev);
16387         int err;
16388
16389         if (!netif_running(dev))
16390                 return 0;
16391
16392         netif_device_attach(dev);
16393
16394         tg3_full_lock(tp, 0);
16395
16396         tg3_flag_set(tp, INIT_COMPLETE);
16397         err = tg3_restart_hw(tp, 1);
16398         if (err)
16399                 goto out;
16400
16401         tg3_timer_start(tp);
16402
16403         tg3_netif_start(tp);
16404
16405 out:
16406         tg3_full_unlock(tp);
16407
16408         if (!err)
16409                 tg3_phy_start(tp);
16410
16411         return err;
16412 }
16413
16414 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16415 #define TG3_PM_OPS (&tg3_pm_ops)
16416
16417 #else
16418
16419 #define TG3_PM_OPS NULL
16420
16421 #endif /* CONFIG_PM_SLEEP */
16422
16423 /**
16424  * tg3_io_error_detected - called when PCI error is detected
16425  * @pdev: Pointer to PCI device
16426  * @state: The current pci connection state
16427  *
16428  * This function is called after a PCI bus error affecting
16429  * this device has been detected.
16430  */
16431 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16432                                               pci_channel_state_t state)
16433 {
16434         struct net_device *netdev = pci_get_drvdata(pdev);
16435         struct tg3 *tp = netdev_priv(netdev);
16436         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16437
16438         netdev_info(netdev, "PCI I/O error detected\n");
16439
16440         rtnl_lock();
16441
16442         if (!netif_running(netdev))
16443                 goto done;
16444
16445         tg3_phy_stop(tp);
16446
16447         tg3_netif_stop(tp);
16448
16449         tg3_timer_stop(tp);
16450
16451         /* Want to make sure that the reset task doesn't run */
16452         tg3_reset_task_cancel(tp);
16453
16454         netif_device_detach(netdev);
16455
16456         /* Clean up software state, even if MMIO is blocked */
16457         tg3_full_lock(tp, 0);
16458         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16459         tg3_full_unlock(tp);
16460
16461 done:
16462         if (state == pci_channel_io_perm_failure)
16463                 err = PCI_ERS_RESULT_DISCONNECT;
16464         else
16465                 pci_disable_device(pdev);
16466
16467         rtnl_unlock();
16468
16469         return err;
16470 }
16471
16472 /**
16473  * tg3_io_slot_reset - called after the pci bus has been reset.
16474  * @pdev: Pointer to PCI device
16475  *
16476  * Restart the card from scratch, as if from a cold-boot.
16477  * At this point, the card has exprienced a hard reset,
16478  * followed by fixups by BIOS, and has its config space
16479  * set up identically to what it was at cold boot.
16480  */
16481 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16482 {
16483         struct net_device *netdev = pci_get_drvdata(pdev);
16484         struct tg3 *tp = netdev_priv(netdev);
16485         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16486         int err;
16487
16488         rtnl_lock();
16489
16490         if (pci_enable_device(pdev)) {
16491                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16492                 goto done;
16493         }
16494
16495         pci_set_master(pdev);
16496         pci_restore_state(pdev);
16497         pci_save_state(pdev);
16498
16499         if (!netif_running(netdev)) {
16500                 rc = PCI_ERS_RESULT_RECOVERED;
16501                 goto done;
16502         }
16503
16504         err = tg3_power_up(tp);
16505         if (err)
16506                 goto done;
16507
16508         rc = PCI_ERS_RESULT_RECOVERED;
16509
16510 done:
16511         rtnl_unlock();
16512
16513         return rc;
16514 }
16515
16516 /**
16517  * tg3_io_resume - called when traffic can start flowing again.
16518  * @pdev: Pointer to PCI device
16519  *
16520  * This callback is called when the error recovery driver tells
16521  * us that its OK to resume normal operation.
16522  */
16523 static void tg3_io_resume(struct pci_dev *pdev)
16524 {
16525         struct net_device *netdev = pci_get_drvdata(pdev);
16526         struct tg3 *tp = netdev_priv(netdev);
16527         int err;
16528
16529         rtnl_lock();
16530
16531         if (!netif_running(netdev))
16532                 goto done;
16533
16534         tg3_full_lock(tp, 0);
16535         tg3_flag_set(tp, INIT_COMPLETE);
16536         err = tg3_restart_hw(tp, 1);
16537         tg3_full_unlock(tp);
16538         if (err) {
16539                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16540                 goto done;
16541         }
16542
16543         netif_device_attach(netdev);
16544
16545         tg3_timer_start(tp);
16546
16547         tg3_netif_start(tp);
16548
16549         tg3_phy_start(tp);
16550
16551 done:
16552         rtnl_unlock();
16553 }
16554
16555 static const struct pci_error_handlers tg3_err_handler = {
16556         .error_detected = tg3_io_error_detected,
16557         .slot_reset     = tg3_io_slot_reset,
16558         .resume         = tg3_io_resume
16559 };
16560
16561 static struct pci_driver tg3_driver = {
16562         .name           = DRV_MODULE_NAME,
16563         .id_table       = tg3_pci_tbl,
16564         .probe          = tg3_init_one,
16565         .remove         = __devexit_p(tg3_remove_one),
16566         .err_handler    = &tg3_err_handler,
16567         .driver.pm      = TG3_PM_OPS,
16568 };
16569
16570 static int __init tg3_init(void)
16571 {
16572         return pci_register_driver(&tg3_driver);
16573 }
16574
16575 static void __exit tg3_cleanup(void)
16576 {
16577         pci_unregister_driver(&tg3_driver);
16578 }
16579
16580 module_init(tg3_init);
16581 module_exit(tg3_cleanup);