tg3: Call tg3_netif_stop() from tg3_stop()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #ifdef CONFIG_SPARC
58 #include <asm/idprom.h>
59 #include <asm/prom.h>
60 #endif
61
62 #define BAR_0   0
63 #define BAR_2   2
64
65 #include "tg3.h"
66
67 /* Functions & macros to verify TG3_FLAGS types */
68
69 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 {
71         return test_bit(flag, bits);
72 }
73
74 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76         set_bit(flag, bits);
77 }
78
79 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81         clear_bit(flag, bits);
82 }
83
84 #define tg3_flag(tp, flag)                              \
85         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_set(tp, flag)                          \
87         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_clear(tp, flag)                        \
89         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
91 #define DRV_MODULE_NAME         "tg3"
92 #define TG3_MAJ_NUM                     3
93 #define TG3_MIN_NUM                     126
94 #define DRV_MODULE_VERSION      \
95         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96 #define DRV_MODULE_RELDATE      "November 05, 2012"
97
98 #define RESET_KIND_SHUTDOWN     0
99 #define RESET_KIND_INIT         1
100 #define RESET_KIND_SUSPEND      2
101
102 #define TG3_DEF_RX_MODE         0
103 #define TG3_DEF_TX_MODE         0
104 #define TG3_DEF_MSG_ENABLE        \
105         (NETIF_MSG_DRV          | \
106          NETIF_MSG_PROBE        | \
107          NETIF_MSG_LINK         | \
108          NETIF_MSG_TIMER        | \
109          NETIF_MSG_IFDOWN       | \
110          NETIF_MSG_IFUP         | \
111          NETIF_MSG_RX_ERR       | \
112          NETIF_MSG_TX_ERR)
113
114 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
115
116 /* length of time before we decide the hardware is borked,
117  * and dev->tx_timeout() should be called to fix the problem
118  */
119
120 #define TG3_TX_TIMEOUT                  (5 * HZ)
121
122 /* hardware minimum and maximum for a single frame's data payload */
123 #define TG3_MIN_MTU                     60
124 #define TG3_MAX_MTU(tp) \
125         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126
127 /* These numbers seem to be hard coded in the NIC firmware somehow.
128  * You can't change the ring sizes, but you can change where you place
129  * them in the NIC onboard memory.
130  */
131 #define TG3_RX_STD_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
134 #define TG3_DEF_RX_RING_PENDING         200
135 #define TG3_RX_JMB_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
138 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX_2K            2048
203 #define TG3_TX_BD_DMA_MAX_4K            4096
204
205 #define TG3_RAW_IP_ALIGN 2
206
207 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
208 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209
210 #define FIRMWARE_TG3            "tigon/tg3.bin"
211 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
212 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
213
214 static char version[] __devinitdata =
215         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
216
217 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219 MODULE_LICENSE("GPL");
220 MODULE_VERSION(DRV_MODULE_VERSION);
221 MODULE_FIRMWARE(FIRMWARE_TG3);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
225 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
226 module_param(tg3_debug, int, 0);
227 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
229 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
305         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
306         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
308         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
309         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
310         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
311         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
312         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
313         {}
314 };
315
316 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
317
318 static const struct {
319         const char string[ETH_GSTRING_LEN];
320 } ethtool_stats_keys[] = {
321         { "rx_octets" },
322         { "rx_fragments" },
323         { "rx_ucast_packets" },
324         { "rx_mcast_packets" },
325         { "rx_bcast_packets" },
326         { "rx_fcs_errors" },
327         { "rx_align_errors" },
328         { "rx_xon_pause_rcvd" },
329         { "rx_xoff_pause_rcvd" },
330         { "rx_mac_ctrl_rcvd" },
331         { "rx_xoff_entered" },
332         { "rx_frame_too_long_errors" },
333         { "rx_jabbers" },
334         { "rx_undersize_packets" },
335         { "rx_in_length_errors" },
336         { "rx_out_length_errors" },
337         { "rx_64_or_less_octet_packets" },
338         { "rx_65_to_127_octet_packets" },
339         { "rx_128_to_255_octet_packets" },
340         { "rx_256_to_511_octet_packets" },
341         { "rx_512_to_1023_octet_packets" },
342         { "rx_1024_to_1522_octet_packets" },
343         { "rx_1523_to_2047_octet_packets" },
344         { "rx_2048_to_4095_octet_packets" },
345         { "rx_4096_to_8191_octet_packets" },
346         { "rx_8192_to_9022_octet_packets" },
347
348         { "tx_octets" },
349         { "tx_collisions" },
350
351         { "tx_xon_sent" },
352         { "tx_xoff_sent" },
353         { "tx_flow_control" },
354         { "tx_mac_errors" },
355         { "tx_single_collisions" },
356         { "tx_mult_collisions" },
357         { "tx_deferred" },
358         { "tx_excessive_collisions" },
359         { "tx_late_collisions" },
360         { "tx_collide_2times" },
361         { "tx_collide_3times" },
362         { "tx_collide_4times" },
363         { "tx_collide_5times" },
364         { "tx_collide_6times" },
365         { "tx_collide_7times" },
366         { "tx_collide_8times" },
367         { "tx_collide_9times" },
368         { "tx_collide_10times" },
369         { "tx_collide_11times" },
370         { "tx_collide_12times" },
371         { "tx_collide_13times" },
372         { "tx_collide_14times" },
373         { "tx_collide_15times" },
374         { "tx_ucast_packets" },
375         { "tx_mcast_packets" },
376         { "tx_bcast_packets" },
377         { "tx_carrier_sense_errors" },
378         { "tx_discards" },
379         { "tx_errors" },
380
381         { "dma_writeq_full" },
382         { "dma_write_prioq_full" },
383         { "rxbds_empty" },
384         { "rx_discards" },
385         { "rx_errors" },
386         { "rx_threshold_hit" },
387
388         { "dma_readq_full" },
389         { "dma_read_prioq_full" },
390         { "tx_comp_queue_full" },
391
392         { "ring_set_send_prod_index" },
393         { "ring_status_update" },
394         { "nic_irqs" },
395         { "nic_avoided_irqs" },
396         { "nic_tx_threshold_hit" },
397
398         { "mbuf_lwm_thresh_hit" },
399 };
400
401 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
402
403
404 static const struct {
405         const char string[ETH_GSTRING_LEN];
406 } ethtool_test_keys[] = {
407         { "nvram test        (online) " },
408         { "link test         (online) " },
409         { "register test     (offline)" },
410         { "memory test       (offline)" },
411         { "mac loopback test (offline)" },
412         { "phy loopback test (offline)" },
413         { "ext loopback test (offline)" },
414         { "interrupt test    (offline)" },
415 };
416
417 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
418
419
420 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
421 {
422         writel(val, tp->regs + off);
423 }
424
425 static u32 tg3_read32(struct tg3 *tp, u32 off)
426 {
427         return readl(tp->regs + off);
428 }
429
430 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
431 {
432         writel(val, tp->aperegs + off);
433 }
434
435 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
436 {
437         return readl(tp->aperegs + off);
438 }
439
440 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
441 {
442         unsigned long flags;
443
444         spin_lock_irqsave(&tp->indirect_lock, flags);
445         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
447         spin_unlock_irqrestore(&tp->indirect_lock, flags);
448 }
449
450 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
451 {
452         writel(val, tp->regs + off);
453         readl(tp->regs + off);
454 }
455
456 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
457 {
458         unsigned long flags;
459         u32 val;
460
461         spin_lock_irqsave(&tp->indirect_lock, flags);
462         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
463         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
464         spin_unlock_irqrestore(&tp->indirect_lock, flags);
465         return val;
466 }
467
468 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
469 {
470         unsigned long flags;
471
472         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
473                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
474                                        TG3_64BIT_REG_LOW, val);
475                 return;
476         }
477         if (off == TG3_RX_STD_PROD_IDX_REG) {
478                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
479                                        TG3_64BIT_REG_LOW, val);
480                 return;
481         }
482
483         spin_lock_irqsave(&tp->indirect_lock, flags);
484         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487
488         /* In indirect mode when disabling interrupts, we also need
489          * to clear the interrupt bit in the GRC local ctrl register.
490          */
491         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
492             (val == 0x1)) {
493                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
494                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
495         }
496 }
497
498 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
499 {
500         unsigned long flags;
501         u32 val;
502
503         spin_lock_irqsave(&tp->indirect_lock, flags);
504         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
505         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506         spin_unlock_irqrestore(&tp->indirect_lock, flags);
507         return val;
508 }
509
510 /* usec_wait specifies the wait time in usec when writing to certain registers
511  * where it is unsafe to read back the register without some delay.
512  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
513  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
514  */
515 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
516 {
517         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
518                 /* Non-posted methods */
519                 tp->write32(tp, off, val);
520         else {
521                 /* Posted method */
522                 tg3_write32(tp, off, val);
523                 if (usec_wait)
524                         udelay(usec_wait);
525                 tp->read32(tp, off);
526         }
527         /* Wait again after the read for the posted method to guarantee that
528          * the wait time is met.
529          */
530         if (usec_wait)
531                 udelay(usec_wait);
532 }
533
534 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
535 {
536         tp->write32_mbox(tp, off, val);
537         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
538                 tp->read32_mbox(tp, off);
539 }
540
541 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
542 {
543         void __iomem *mbox = tp->regs + off;
544         writel(val, mbox);
545         if (tg3_flag(tp, TXD_MBOX_HWBUG))
546                 writel(val, mbox);
547         if (tg3_flag(tp, MBOX_WRITE_REORDER))
548                 readl(mbox);
549 }
550
551 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
552 {
553         return readl(tp->regs + off + GRCMBOX_BASE);
554 }
555
556 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
557 {
558         writel(val, tp->regs + off + GRCMBOX_BASE);
559 }
560
561 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
562 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
563 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
564 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
565 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
566
567 #define tw32(reg, val)                  tp->write32(tp, reg, val)
568 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
569 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
570 #define tr32(reg)                       tp->read32(tp, reg)
571
572 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
573 {
574         unsigned long flags;
575
576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
577             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
578                 return;
579
580         spin_lock_irqsave(&tp->indirect_lock, flags);
581         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
584
585                 /* Always leave this as zero. */
586                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
587         } else {
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
589                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
590
591                 /* Always leave this as zero. */
592                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
593         }
594         spin_unlock_irqrestore(&tp->indirect_lock, flags);
595 }
596
597 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
598 {
599         unsigned long flags;
600
601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
602             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
603                 *val = 0;
604                 return;
605         }
606
607         spin_lock_irqsave(&tp->indirect_lock, flags);
608         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
610                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
611
612                 /* Always leave this as zero. */
613                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
614         } else {
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
616                 *val = tr32(TG3PCI_MEM_WIN_DATA);
617
618                 /* Always leave this as zero. */
619                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
620         }
621         spin_unlock_irqrestore(&tp->indirect_lock, flags);
622 }
623
624 static void tg3_ape_lock_init(struct tg3 *tp)
625 {
626         int i;
627         u32 regbase, bit;
628
629         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
630                 regbase = TG3_APE_LOCK_GRANT;
631         else
632                 regbase = TG3_APE_PER_LOCK_GRANT;
633
634         /* Make sure the driver hasn't any stale locks. */
635         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
636                 switch (i) {
637                 case TG3_APE_LOCK_PHY0:
638                 case TG3_APE_LOCK_PHY1:
639                 case TG3_APE_LOCK_PHY2:
640                 case TG3_APE_LOCK_PHY3:
641                         bit = APE_LOCK_GRANT_DRIVER;
642                         break;
643                 default:
644                         if (!tp->pci_fn)
645                                 bit = APE_LOCK_GRANT_DRIVER;
646                         else
647                                 bit = 1 << tp->pci_fn;
648                 }
649                 tg3_ape_write32(tp, regbase + 4 * i, bit);
650         }
651
652 }
653
654 static int tg3_ape_lock(struct tg3 *tp, int locknum)
655 {
656         int i, off;
657         int ret = 0;
658         u32 status, req, gnt, bit;
659
660         if (!tg3_flag(tp, ENABLE_APE))
661                 return 0;
662
663         switch (locknum) {
664         case TG3_APE_LOCK_GPIO:
665                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666                         return 0;
667         case TG3_APE_LOCK_GRC:
668         case TG3_APE_LOCK_MEM:
669                 if (!tp->pci_fn)
670                         bit = APE_LOCK_REQ_DRIVER;
671                 else
672                         bit = 1 << tp->pci_fn;
673                 break;
674         case TG3_APE_LOCK_PHY0:
675         case TG3_APE_LOCK_PHY1:
676         case TG3_APE_LOCK_PHY2:
677         case TG3_APE_LOCK_PHY3:
678                 bit = APE_LOCK_REQ_DRIVER;
679                 break;
680         default:
681                 return -EINVAL;
682         }
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
685                 req = TG3_APE_LOCK_REQ;
686                 gnt = TG3_APE_LOCK_GRANT;
687         } else {
688                 req = TG3_APE_PER_LOCK_REQ;
689                 gnt = TG3_APE_PER_LOCK_GRANT;
690         }
691
692         off = 4 * locknum;
693
694         tg3_ape_write32(tp, req + off, bit);
695
696         /* Wait for up to 1 millisecond to acquire lock. */
697         for (i = 0; i < 100; i++) {
698                 status = tg3_ape_read32(tp, gnt + off);
699                 if (status == bit)
700                         break;
701                 udelay(10);
702         }
703
704         if (status != bit) {
705                 /* Revoke the lock request. */
706                 tg3_ape_write32(tp, gnt + off, bit);
707                 ret = -EBUSY;
708         }
709
710         return ret;
711 }
712
713 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
714 {
715         u32 gnt, bit;
716
717         if (!tg3_flag(tp, ENABLE_APE))
718                 return;
719
720         switch (locknum) {
721         case TG3_APE_LOCK_GPIO:
722                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
723                         return;
724         case TG3_APE_LOCK_GRC:
725         case TG3_APE_LOCK_MEM:
726                 if (!tp->pci_fn)
727                         bit = APE_LOCK_GRANT_DRIVER;
728                 else
729                         bit = 1 << tp->pci_fn;
730                 break;
731         case TG3_APE_LOCK_PHY0:
732         case TG3_APE_LOCK_PHY1:
733         case TG3_APE_LOCK_PHY2:
734         case TG3_APE_LOCK_PHY3:
735                 bit = APE_LOCK_GRANT_DRIVER;
736                 break;
737         default:
738                 return;
739         }
740
741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
742                 gnt = TG3_APE_LOCK_GRANT;
743         else
744                 gnt = TG3_APE_PER_LOCK_GRANT;
745
746         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
747 }
748
749 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
750 {
751         u32 apedata;
752
753         while (timeout_us) {
754                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
755                         return -EBUSY;
756
757                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
758                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759                         break;
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 udelay(10);
764                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
765         }
766
767         return timeout_us ? 0 : -EBUSY;
768 }
769
770 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
771 {
772         u32 i, apedata;
773
774         for (i = 0; i < timeout_us / 10; i++) {
775                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
776
777                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
778                         break;
779
780                 udelay(10);
781         }
782
783         return i == timeout_us / 10;
784 }
785
786 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
787                                    u32 len)
788 {
789         int err;
790         u32 i, bufoff, msgoff, maxlen, apedata;
791
792         if (!tg3_flag(tp, APE_HAS_NCSI))
793                 return 0;
794
795         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
796         if (apedata != APE_SEG_SIG_MAGIC)
797                 return -ENODEV;
798
799         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
800         if (!(apedata & APE_FW_STATUS_READY))
801                 return -EAGAIN;
802
803         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804                  TG3_APE_SHMEM_BASE;
805         msgoff = bufoff + 2 * sizeof(u32);
806         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
807
808         while (len) {
809                 u32 length;
810
811                 /* Cap xfer sizes to scratchpad limits. */
812                 length = (len > maxlen) ? maxlen : len;
813                 len -= length;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
816                 if (!(apedata & APE_FW_STATUS_READY))
817                         return -EAGAIN;
818
819                 /* Wait for up to 1 msec for APE to service previous event. */
820                 err = tg3_ape_event_lock(tp, 1000);
821                 if (err)
822                         return err;
823
824                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
825                           APE_EVENT_STATUS_SCRTCHPD_READ |
826                           APE_EVENT_STATUS_EVENT_PENDING;
827                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
828
829                 tg3_ape_write32(tp, bufoff, base_off);
830                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
831
832                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
833                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
834
835                 base_off += length;
836
837                 if (tg3_ape_wait_for_event(tp, 30000))
838                         return -EAGAIN;
839
840                 for (i = 0; length; i += 4, length -= 4) {
841                         u32 val = tg3_ape_read32(tp, msgoff + i);
842                         memcpy(data, &val, sizeof(u32));
843                         data++;
844                 }
845         }
846
847         return 0;
848 }
849
850 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
851 {
852         int err;
853         u32 apedata;
854
855         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856         if (apedata != APE_SEG_SIG_MAGIC)
857                 return -EAGAIN;
858
859         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860         if (!(apedata & APE_FW_STATUS_READY))
861                 return -EAGAIN;
862
863         /* Wait for up to 1 millisecond for APE to service previous event. */
864         err = tg3_ape_event_lock(tp, 1000);
865         if (err)
866                 return err;
867
868         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
869                         event | APE_EVENT_STATUS_EVENT_PENDING);
870
871         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874         return 0;
875 }
876
877 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
878 {
879         u32 event;
880         u32 apedata;
881
882         if (!tg3_flag(tp, ENABLE_APE))
883                 return;
884
885         switch (kind) {
886         case RESET_KIND_INIT:
887                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
888                                 APE_HOST_SEG_SIG_MAGIC);
889                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
890                                 APE_HOST_SEG_LEN_MAGIC);
891                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
892                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
893                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
894                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
895                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
896                                 APE_HOST_BEHAV_NO_PHYLOCK);
897                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
898                                     TG3_APE_HOST_DRVR_STATE_START);
899
900                 event = APE_EVENT_STATUS_STATE_START;
901                 break;
902         case RESET_KIND_SHUTDOWN:
903                 /* With the interface we are currently using,
904                  * APE does not track driver state.  Wiping
905                  * out the HOST SEGMENT SIGNATURE forces
906                  * the APE to assume OS absent status.
907                  */
908                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
909
910                 if (device_may_wakeup(&tp->pdev->dev) &&
911                     tg3_flag(tp, WOL_ENABLE)) {
912                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
913                                             TG3_APE_HOST_WOL_SPEED_AUTO);
914                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915                 } else
916                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
917
918                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
919
920                 event = APE_EVENT_STATUS_STATE_UNLOAD;
921                 break;
922         case RESET_KIND_SUSPEND:
923                 event = APE_EVENT_STATUS_STATE_SUSPEND;
924                 break;
925         default:
926                 return;
927         }
928
929         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
930
931         tg3_ape_send_event(tp, event);
932 }
933
934 static void tg3_disable_ints(struct tg3 *tp)
935 {
936         int i;
937
938         tw32(TG3PCI_MISC_HOST_CTRL,
939              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
940         for (i = 0; i < tp->irq_max; i++)
941                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
942 }
943
944 static void tg3_enable_ints(struct tg3 *tp)
945 {
946         int i;
947
948         tp->irq_sync = 0;
949         wmb();
950
951         tw32(TG3PCI_MISC_HOST_CTRL,
952              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
953
954         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
955         for (i = 0; i < tp->irq_cnt; i++) {
956                 struct tg3_napi *tnapi = &tp->napi[i];
957
958                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959                 if (tg3_flag(tp, 1SHOT_MSI))
960                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
961
962                 tp->coal_now |= tnapi->coal_now;
963         }
964
965         /* Force an initial interrupt */
966         if (!tg3_flag(tp, TAGGED_STATUS) &&
967             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
968                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969         else
970                 tw32(HOSTCC_MODE, tp->coal_now);
971
972         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
973 }
974
975 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
976 {
977         struct tg3 *tp = tnapi->tp;
978         struct tg3_hw_status *sblk = tnapi->hw_status;
979         unsigned int work_exists = 0;
980
981         /* check for phy events */
982         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
983                 if (sblk->status & SD_STATUS_LINK_CHG)
984                         work_exists = 1;
985         }
986
987         /* check for TX work to do */
988         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989                 work_exists = 1;
990
991         /* check for RX work to do */
992         if (tnapi->rx_rcb_prod_idx &&
993             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
994                 work_exists = 1;
995
996         return work_exists;
997 }
998
999 /* tg3_int_reenable
1000  *  similar to tg3_enable_ints, but it accurately determines whether there
1001  *  is new work pending and can return without flushing the PIO write
1002  *  which reenables interrupts
1003  */
1004 static void tg3_int_reenable(struct tg3_napi *tnapi)
1005 {
1006         struct tg3 *tp = tnapi->tp;
1007
1008         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009         mmiowb();
1010
1011         /* When doing tagged status, this work check is unnecessary.
1012          * The last_tag we write above tells the chip which piece of
1013          * work we've completed.
1014          */
1015         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1016                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1017                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1018 }
1019
1020 static void tg3_switch_clocks(struct tg3 *tp)
1021 {
1022         u32 clock_ctrl;
1023         u32 orig_clock_ctrl;
1024
1025         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026                 return;
1027
1028         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1029
1030         orig_clock_ctrl = clock_ctrl;
1031         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1032                        CLOCK_CTRL_CLKRUN_OENABLE |
1033                        0x1f);
1034         tp->pci_clock_ctrl = clock_ctrl;
1035
1036         if (tg3_flag(tp, 5705_PLUS)) {
1037                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1038                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1039                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1040                 }
1041         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1042                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043                             clock_ctrl |
1044                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045                             40);
1046                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1047                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048                             40);
1049         }
1050         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1051 }
1052
1053 #define PHY_BUSY_LOOPS  5000
1054
1055 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1056 {
1057         u32 frame_val;
1058         unsigned int loops;
1059         int ret;
1060
1061         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062                 tw32_f(MAC_MI_MODE,
1063                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1064                 udelay(80);
1065         }
1066
1067         tg3_ape_lock(tp, tp->phy_ape_lock);
1068
1069         *val = 0x0;
1070
1071         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1072                       MI_COM_PHY_ADDR_MASK);
1073         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1074                       MI_COM_REG_ADDR_MASK);
1075         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1076
1077         tw32_f(MAC_MI_COM, frame_val);
1078
1079         loops = PHY_BUSY_LOOPS;
1080         while (loops != 0) {
1081                 udelay(10);
1082                 frame_val = tr32(MAC_MI_COM);
1083
1084                 if ((frame_val & MI_COM_BUSY) == 0) {
1085                         udelay(5);
1086                         frame_val = tr32(MAC_MI_COM);
1087                         break;
1088                 }
1089                 loops -= 1;
1090         }
1091
1092         ret = -EBUSY;
1093         if (loops != 0) {
1094                 *val = frame_val & MI_COM_DATA_MASK;
1095                 ret = 0;
1096         }
1097
1098         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_unlock(tp, tp->phy_ape_lock);
1104
1105         return ret;
1106 }
1107
1108 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1109 {
1110         u32 frame_val;
1111         unsigned int loops;
1112         int ret;
1113
1114         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1115             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116                 return 0;
1117
1118         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119                 tw32_f(MAC_MI_MODE,
1120                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1121                 udelay(80);
1122         }
1123
1124         tg3_ape_lock(tp, tp->phy_ape_lock);
1125
1126         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (val & MI_COM_DATA_MASK);
1131         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1132
1133         tw32_f(MAC_MI_COM, frame_val);
1134
1135         loops = PHY_BUSY_LOOPS;
1136         while (loops != 0) {
1137                 udelay(10);
1138                 frame_val = tr32(MAC_MI_COM);
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0)
1149                 ret = 0;
1150
1151         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153                 udelay(80);
1154         }
1155
1156         tg3_ape_unlock(tp, tp->phy_ape_lock);
1157
1158         return ret;
1159 }
1160
1161 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1162 {
1163         int err;
1164
1165         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1166         if (err)
1167                 goto done;
1168
1169         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1170         if (err)
1171                 goto done;
1172
1173         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1174                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1175         if (err)
1176                 goto done;
1177
1178         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1179
1180 done:
1181         return err;
1182 }
1183
1184 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1185 {
1186         int err;
1187
1188         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1189         if (err)
1190                 goto done;
1191
1192         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1193         if (err)
1194                 goto done;
1195
1196         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1197                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1198         if (err)
1199                 goto done;
1200
1201         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1202
1203 done:
1204         return err;
1205 }
1206
1207 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1208 {
1209         int err;
1210
1211         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212         if (!err)
1213                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1214
1215         return err;
1216 }
1217
1218 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223         if (!err)
1224                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1225
1226         return err;
1227 }
1228
1229 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1230 {
1231         int err;
1232
1233         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1234                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1235                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1236         if (!err)
1237                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1238
1239         return err;
1240 }
1241
1242 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1243 {
1244         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1245                 set |= MII_TG3_AUXCTL_MISC_WREN;
1246
1247         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1248 }
1249
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1254
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1258
1259 static int tg3_bmcr_reset(struct tg3 *tp)
1260 {
1261         u32 phy_control;
1262         int limit, err;
1263
1264         /* OK, reset it, and poll the BMCR_RESET bit until it
1265          * clears or we time out.
1266          */
1267         phy_control = BMCR_RESET;
1268         err = tg3_writephy(tp, MII_BMCR, phy_control);
1269         if (err != 0)
1270                 return -EBUSY;
1271
1272         limit = 5000;
1273         while (limit--) {
1274                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1275                 if (err != 0)
1276                         return -EBUSY;
1277
1278                 if ((phy_control & BMCR_RESET) == 0) {
1279                         udelay(40);
1280                         break;
1281                 }
1282                 udelay(10);
1283         }
1284         if (limit < 0)
1285                 return -EBUSY;
1286
1287         return 0;
1288 }
1289
1290 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1291 {
1292         struct tg3 *tp = bp->priv;
1293         u32 val;
1294
1295         spin_lock_bh(&tp->lock);
1296
1297         if (tg3_readphy(tp, reg, &val))
1298                 val = -EIO;
1299
1300         spin_unlock_bh(&tp->lock);
1301
1302         return val;
1303 }
1304
1305 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1306 {
1307         struct tg3 *tp = bp->priv;
1308         u32 ret = 0;
1309
1310         spin_lock_bh(&tp->lock);
1311
1312         if (tg3_writephy(tp, reg, val))
1313                 ret = -EIO;
1314
1315         spin_unlock_bh(&tp->lock);
1316
1317         return ret;
1318 }
1319
1320 static int tg3_mdio_reset(struct mii_bus *bp)
1321 {
1322         return 0;
1323 }
1324
1325 static void tg3_mdio_config_5785(struct tg3 *tp)
1326 {
1327         u32 val;
1328         struct phy_device *phydev;
1329
1330         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1331         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1332         case PHY_ID_BCM50610:
1333         case PHY_ID_BCM50610M:
1334                 val = MAC_PHYCFG2_50610_LED_MODES;
1335                 break;
1336         case PHY_ID_BCMAC131:
1337                 val = MAC_PHYCFG2_AC131_LED_MODES;
1338                 break;
1339         case PHY_ID_RTL8211C:
1340                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341                 break;
1342         case PHY_ID_RTL8201E:
1343                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1344                 break;
1345         default:
1346                 return;
1347         }
1348
1349         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1350                 tw32(MAC_PHYCFG2, val);
1351
1352                 val = tr32(MAC_PHYCFG1);
1353                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1354                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1355                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1356                 tw32(MAC_PHYCFG1, val);
1357
1358                 return;
1359         }
1360
1361         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1362                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1363                        MAC_PHYCFG2_FMODE_MASK_MASK |
1364                        MAC_PHYCFG2_GMODE_MASK_MASK |
1365                        MAC_PHYCFG2_ACT_MASK_MASK   |
1366                        MAC_PHYCFG2_QUAL_MASK_MASK |
1367                        MAC_PHYCFG2_INBAND_ENABLE;
1368
1369         tw32(MAC_PHYCFG2, val);
1370
1371         val = tr32(MAC_PHYCFG1);
1372         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1373                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1374         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1375                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1377                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1378                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1379         }
1380         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1381                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1382         tw32(MAC_PHYCFG1, val);
1383
1384         val = tr32(MAC_EXT_RGMII_MODE);
1385         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1386                  MAC_RGMII_MODE_RX_QUALITY |
1387                  MAC_RGMII_MODE_RX_ACTIVITY |
1388                  MAC_RGMII_MODE_RX_ENG_DET |
1389                  MAC_RGMII_MODE_TX_ENABLE |
1390                  MAC_RGMII_MODE_TX_LOWPWR |
1391                  MAC_RGMII_MODE_TX_RESET);
1392         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1393                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1394                         val |= MAC_RGMII_MODE_RX_INT_B |
1395                                MAC_RGMII_MODE_RX_QUALITY |
1396                                MAC_RGMII_MODE_RX_ACTIVITY |
1397                                MAC_RGMII_MODE_RX_ENG_DET;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1399                         val |= MAC_RGMII_MODE_TX_ENABLE |
1400                                MAC_RGMII_MODE_TX_LOWPWR |
1401                                MAC_RGMII_MODE_TX_RESET;
1402         }
1403         tw32(MAC_EXT_RGMII_MODE, val);
1404 }
1405
1406 static void tg3_mdio_start(struct tg3 *tp)
1407 {
1408         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1409         tw32_f(MAC_MI_MODE, tp->mi_mode);
1410         udelay(80);
1411
1412         if (tg3_flag(tp, MDIOBUS_INITED) &&
1413             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414                 tg3_mdio_config_5785(tp);
1415 }
1416
1417 static int tg3_mdio_init(struct tg3 *tp)
1418 {
1419         int i;
1420         u32 reg;
1421         struct phy_device *phydev;
1422
1423         if (tg3_flag(tp, 5717_PLUS)) {
1424                 u32 is_serdes;
1425
1426                 tp->phy_addr = tp->pci_fn + 1;
1427
1428                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1429                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430                 else
1431                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1432                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1433                 if (is_serdes)
1434                         tp->phy_addr += 7;
1435         } else
1436                 tp->phy_addr = TG3_PHY_MII_ADDR;
1437
1438         tg3_mdio_start(tp);
1439
1440         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441                 return 0;
1442
1443         tp->mdio_bus = mdiobus_alloc();
1444         if (tp->mdio_bus == NULL)
1445                 return -ENOMEM;
1446
1447         tp->mdio_bus->name     = "tg3 mdio bus";
1448         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1449                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1450         tp->mdio_bus->priv     = tp;
1451         tp->mdio_bus->parent   = &tp->pdev->dev;
1452         tp->mdio_bus->read     = &tg3_mdio_read;
1453         tp->mdio_bus->write    = &tg3_mdio_write;
1454         tp->mdio_bus->reset    = &tg3_mdio_reset;
1455         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1456         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1457
1458         for (i = 0; i < PHY_MAX_ADDR; i++)
1459                 tp->mdio_bus->irq[i] = PHY_POLL;
1460
1461         /* The bus registration will look for all the PHYs on the mdio bus.
1462          * Unfortunately, it does not ensure the PHY is powered up before
1463          * accessing the PHY ID registers.  A chip reset is the
1464          * quickest way to bring the device back to an operational state..
1465          */
1466         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1467                 tg3_bmcr_reset(tp);
1468
1469         i = mdiobus_register(tp->mdio_bus);
1470         if (i) {
1471                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1472                 mdiobus_free(tp->mdio_bus);
1473                 return i;
1474         }
1475
1476         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1477
1478         if (!phydev || !phydev->drv) {
1479                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1480                 mdiobus_unregister(tp->mdio_bus);
1481                 mdiobus_free(tp->mdio_bus);
1482                 return -ENODEV;
1483         }
1484
1485         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1486         case PHY_ID_BCM57780:
1487                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1488                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489                 break;
1490         case PHY_ID_BCM50610:
1491         case PHY_ID_BCM50610M:
1492                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1493                                      PHY_BRCM_RX_REFCLK_UNUSED |
1494                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1495                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1496                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1497                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1498                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1500                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1501                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502                 /* fallthru */
1503         case PHY_ID_RTL8211C:
1504                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505                 break;
1506         case PHY_ID_RTL8201E:
1507         case PHY_ID_BCMAC131:
1508                 phydev->interface = PHY_INTERFACE_MODE_MII;
1509                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1510                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1511                 break;
1512         }
1513
1514         tg3_flag_set(tp, MDIOBUS_INITED);
1515
1516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1517                 tg3_mdio_config_5785(tp);
1518
1519         return 0;
1520 }
1521
1522 static void tg3_mdio_fini(struct tg3 *tp)
1523 {
1524         if (tg3_flag(tp, MDIOBUS_INITED)) {
1525                 tg3_flag_clear(tp, MDIOBUS_INITED);
1526                 mdiobus_unregister(tp->mdio_bus);
1527                 mdiobus_free(tp->mdio_bus);
1528         }
1529 }
1530
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3 *tp)
1533 {
1534         u32 val;
1535
1536         val = tr32(GRC_RX_CPU_EVENT);
1537         val |= GRC_RX_CPU_DRIVER_EVENT;
1538         tw32_f(GRC_RX_CPU_EVENT, val);
1539
1540         tp->last_event_jiffies = jiffies;
1541 }
1542
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1544
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3 *tp)
1547 {
1548         int i;
1549         unsigned int delay_cnt;
1550         long time_remain;
1551
1552         /* If enough time has passed, no wait is necessary. */
1553         time_remain = (long)(tp->last_event_jiffies + 1 +
1554                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555                       (long)jiffies;
1556         if (time_remain < 0)
1557                 return;
1558
1559         /* Check if we can shorten the wait time. */
1560         delay_cnt = jiffies_to_usecs(time_remain);
1561         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1562                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1563         delay_cnt = (delay_cnt >> 3) + 1;
1564
1565         for (i = 0; i < delay_cnt; i++) {
1566                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1567                         break;
1568                 udelay(8);
1569         }
1570 }
1571
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1574 {
1575         u32 reg, val;
1576
1577         val = 0;
1578         if (!tg3_readphy(tp, MII_BMCR, &reg))
1579                 val = reg << 16;
1580         if (!tg3_readphy(tp, MII_BMSR, &reg))
1581                 val |= (reg & 0xffff);
1582         *data++ = val;
1583
1584         val = 0;
1585         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1586                 val = reg << 16;
1587         if (!tg3_readphy(tp, MII_LPA, &reg))
1588                 val |= (reg & 0xffff);
1589         *data++ = val;
1590
1591         val = 0;
1592         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1593                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1594                         val = reg << 16;
1595                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1596                         val |= (reg & 0xffff);
1597         }
1598         *data++ = val;
1599
1600         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1601                 val = reg << 16;
1602         else
1603                 val = 0;
1604         *data++ = val;
1605 }
1606
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3 *tp)
1609 {
1610         u32 data[4];
1611
1612         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613                 return;
1614
1615         tg3_phy_gather_ump_data(tp, data);
1616
1617         tg3_wait_for_event_ack(tp);
1618
1619         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1620         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1621         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1622         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1623         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1624         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1625
1626         tg3_generate_fw_event(tp);
1627 }
1628
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3 *tp)
1631 {
1632         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1633                 /* Wait for RX cpu to ACK the previous event. */
1634                 tg3_wait_for_event_ack(tp);
1635
1636                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1637
1638                 tg3_generate_fw_event(tp);
1639
1640                 /* Wait for RX cpu to ACK this event. */
1641                 tg3_wait_for_event_ack(tp);
1642         }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1647 {
1648         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1649                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1650
1651         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652                 switch (kind) {
1653                 case RESET_KIND_INIT:
1654                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1655                                       DRV_STATE_START);
1656                         break;
1657
1658                 case RESET_KIND_SHUTDOWN:
1659                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1660                                       DRV_STATE_UNLOAD);
1661                         break;
1662
1663                 case RESET_KIND_SUSPEND:
1664                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665                                       DRV_STATE_SUSPEND);
1666                         break;
1667
1668                 default:
1669                         break;
1670                 }
1671         }
1672
1673         if (kind == RESET_KIND_INIT ||
1674             kind == RESET_KIND_SUSPEND)
1675                 tg3_ape_driver_state_change(tp, kind);
1676 }
1677
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1680 {
1681         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682                 switch (kind) {
1683                 case RESET_KIND_INIT:
1684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685                                       DRV_STATE_START_DONE);
1686                         break;
1687
1688                 case RESET_KIND_SHUTDOWN:
1689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690                                       DRV_STATE_UNLOAD_DONE);
1691                         break;
1692
1693                 default:
1694                         break;
1695                 }
1696         }
1697
1698         if (kind == RESET_KIND_SHUTDOWN)
1699                 tg3_ape_driver_state_change(tp, kind);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1704 {
1705         if (tg3_flag(tp, ENABLE_ASF)) {
1706                 switch (kind) {
1707                 case RESET_KIND_INIT:
1708                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709                                       DRV_STATE_START);
1710                         break;
1711
1712                 case RESET_KIND_SHUTDOWN:
1713                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714                                       DRV_STATE_UNLOAD);
1715                         break;
1716
1717                 case RESET_KIND_SUSPEND:
1718                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1719                                       DRV_STATE_SUSPEND);
1720                         break;
1721
1722                 default:
1723                         break;
1724                 }
1725         }
1726 }
1727
1728 static int tg3_poll_fw(struct tg3 *tp)
1729 {
1730         int i;
1731         u32 val;
1732
1733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1734                 /* Wait up to 20ms for init done. */
1735                 for (i = 0; i < 200; i++) {
1736                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1737                                 return 0;
1738                         udelay(100);
1739                 }
1740                 return -ENODEV;
1741         }
1742
1743         /* Wait for firmware initialization to complete. */
1744         for (i = 0; i < 100000; i++) {
1745                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1746                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1747                         break;
1748                 udelay(10);
1749         }
1750
1751         /* Chip might not be fitted with firmware.  Some Sun onboard
1752          * parts are configured like that.  So don't signal the timeout
1753          * of the above loop as an error, but do report the lack of
1754          * running firmware once.
1755          */
1756         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1757                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1758
1759                 netdev_info(tp->dev, "No firmware running\n");
1760         }
1761
1762         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1763                 /* The 57765 A0 needs a little more
1764                  * time to do some important work.
1765                  */
1766                 mdelay(10);
1767         }
1768
1769         return 0;
1770 }
1771
1772 static void tg3_link_report(struct tg3 *tp)
1773 {
1774         if (!netif_carrier_ok(tp->dev)) {
1775                 netif_info(tp, link, tp->dev, "Link is down\n");
1776                 tg3_ump_link_report(tp);
1777         } else if (netif_msg_link(tp)) {
1778                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1779                             (tp->link_config.active_speed == SPEED_1000 ?
1780                              1000 :
1781                              (tp->link_config.active_speed == SPEED_100 ?
1782                               100 : 10)),
1783                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1784                              "full" : "half"));
1785
1786                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1787                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788                             "on" : "off",
1789                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790                             "on" : "off");
1791
1792                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1793                         netdev_info(tp->dev, "EEE is %s\n",
1794                                     tp->setlpicnt ? "enabled" : "disabled");
1795
1796                 tg3_ump_link_report(tp);
1797         }
1798 }
1799
1800 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1801 {
1802         u16 miireg;
1803
1804         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1805                 miireg = ADVERTISE_1000XPAUSE;
1806         else if (flow_ctrl & FLOW_CTRL_TX)
1807                 miireg = ADVERTISE_1000XPSE_ASYM;
1808         else if (flow_ctrl & FLOW_CTRL_RX)
1809                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1810         else
1811                 miireg = 0;
1812
1813         return miireg;
1814 }
1815
1816 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1817 {
1818         u8 cap = 0;
1819
1820         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1821                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1822         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1823                 if (lcladv & ADVERTISE_1000XPAUSE)
1824                         cap = FLOW_CTRL_RX;
1825                 if (rmtadv & ADVERTISE_1000XPAUSE)
1826                         cap = FLOW_CTRL_TX;
1827         }
1828
1829         return cap;
1830 }
1831
1832 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1833 {
1834         u8 autoneg;
1835         u8 flowctrl = 0;
1836         u32 old_rx_mode = tp->rx_mode;
1837         u32 old_tx_mode = tp->tx_mode;
1838
1839         if (tg3_flag(tp, USE_PHYLIB))
1840                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841         else
1842                 autoneg = tp->link_config.autoneg;
1843
1844         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1845                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1846                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847                 else
1848                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849         } else
1850                 flowctrl = tp->link_config.flowctrl;
1851
1852         tp->link_config.active_flowctrl = flowctrl;
1853
1854         if (flowctrl & FLOW_CTRL_RX)
1855                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856         else
1857                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1858
1859         if (old_rx_mode != tp->rx_mode)
1860                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1861
1862         if (flowctrl & FLOW_CTRL_TX)
1863                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864         else
1865                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1866
1867         if (old_tx_mode != tp->tx_mode)
1868                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1869 }
1870
1871 static void tg3_adjust_link(struct net_device *dev)
1872 {
1873         u8 oldflowctrl, linkmesg = 0;
1874         u32 mac_mode, lcl_adv, rmt_adv;
1875         struct tg3 *tp = netdev_priv(dev);
1876         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1877
1878         spin_lock_bh(&tp->lock);
1879
1880         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1881                                     MAC_MODE_HALF_DUPLEX);
1882
1883         oldflowctrl = tp->link_config.active_flowctrl;
1884
1885         if (phydev->link) {
1886                 lcl_adv = 0;
1887                 rmt_adv = 0;
1888
1889                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1890                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1891                 else if (phydev->speed == SPEED_1000 ||
1892                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1893                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894                 else
1895                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1896
1897                 if (phydev->duplex == DUPLEX_HALF)
1898                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1899                 else {
1900                         lcl_adv = mii_advertise_flowctrl(
1901                                   tp->link_config.flowctrl);
1902
1903                         if (phydev->pause)
1904                                 rmt_adv = LPA_PAUSE_CAP;
1905                         if (phydev->asym_pause)
1906                                 rmt_adv |= LPA_PAUSE_ASYM;
1907                 }
1908
1909                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910         } else
1911                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1912
1913         if (mac_mode != tp->mac_mode) {
1914                 tp->mac_mode = mac_mode;
1915                 tw32_f(MAC_MODE, tp->mac_mode);
1916                 udelay(40);
1917         }
1918
1919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1920                 if (phydev->speed == SPEED_10)
1921                         tw32(MAC_MI_STAT,
1922                              MAC_MI_STAT_10MBPS_MODE |
1923                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924                 else
1925                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1926         }
1927
1928         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1929                 tw32(MAC_TX_LENGTHS,
1930                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1931                       (6 << TX_LENGTHS_IPG_SHIFT) |
1932                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933         else
1934                 tw32(MAC_TX_LENGTHS,
1935                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1936                       (6 << TX_LENGTHS_IPG_SHIFT) |
1937                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1938
1939         if (phydev->link != tp->old_link ||
1940             phydev->speed != tp->link_config.active_speed ||
1941             phydev->duplex != tp->link_config.active_duplex ||
1942             oldflowctrl != tp->link_config.active_flowctrl)
1943                 linkmesg = 1;
1944
1945         tp->old_link = phydev->link;
1946         tp->link_config.active_speed = phydev->speed;
1947         tp->link_config.active_duplex = phydev->duplex;
1948
1949         spin_unlock_bh(&tp->lock);
1950
1951         if (linkmesg)
1952                 tg3_link_report(tp);
1953 }
1954
1955 static int tg3_phy_init(struct tg3 *tp)
1956 {
1957         struct phy_device *phydev;
1958
1959         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960                 return 0;
1961
1962         /* Bring the PHY back to a known state. */
1963         tg3_bmcr_reset(tp);
1964
1965         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1966
1967         /* Attach the MAC to the PHY. */
1968         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1969                              phydev->dev_flags, phydev->interface);
1970         if (IS_ERR(phydev)) {
1971                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1972                 return PTR_ERR(phydev);
1973         }
1974
1975         /* Mask with MAC supported features. */
1976         switch (phydev->interface) {
1977         case PHY_INTERFACE_MODE_GMII:
1978         case PHY_INTERFACE_MODE_RGMII:
1979                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1980                         phydev->supported &= (PHY_GBIT_FEATURES |
1981                                               SUPPORTED_Pause |
1982                                               SUPPORTED_Asym_Pause);
1983                         break;
1984                 }
1985                 /* fallthru */
1986         case PHY_INTERFACE_MODE_MII:
1987                 phydev->supported &= (PHY_BASIC_FEATURES |
1988                                       SUPPORTED_Pause |
1989                                       SUPPORTED_Asym_Pause);
1990                 break;
1991         default:
1992                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1993                 return -EINVAL;
1994         }
1995
1996         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1997
1998         phydev->advertising = phydev->supported;
1999
2000         return 0;
2001 }
2002
2003 static void tg3_phy_start(struct tg3 *tp)
2004 {
2005         struct phy_device *phydev;
2006
2007         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008                 return;
2009
2010         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2011
2012         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2013                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2014                 phydev->speed = tp->link_config.speed;
2015                 phydev->duplex = tp->link_config.duplex;
2016                 phydev->autoneg = tp->link_config.autoneg;
2017                 phydev->advertising = tp->link_config.advertising;
2018         }
2019
2020         phy_start(phydev);
2021
2022         phy_start_aneg(phydev);
2023 }
2024
2025 static void tg3_phy_stop(struct tg3 *tp)
2026 {
2027         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028                 return;
2029
2030         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2031 }
2032
2033 static void tg3_phy_fini(struct tg3 *tp)
2034 {
2035         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2036                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2037                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2038         }
2039 }
2040
2041 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2042 {
2043         int err;
2044         u32 val;
2045
2046         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047                 return 0;
2048
2049         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2050                 /* Cannot do read-modify-write on 5401 */
2051                 err = tg3_phy_auxctl_write(tp,
2052                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2053                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2054                                            0x4c20);
2055                 goto done;
2056         }
2057
2058         err = tg3_phy_auxctl_read(tp,
2059                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2060         if (err)
2061                 return err;
2062
2063         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2064         err = tg3_phy_auxctl_write(tp,
2065                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2066
2067 done:
2068         return err;
2069 }
2070
2071 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2072 {
2073         u32 phytest;
2074
2075         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076                 u32 phy;
2077
2078                 tg3_writephy(tp, MII_TG3_FET_TEST,
2079                              phytest | MII_TG3_FET_SHADOW_EN);
2080                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081                         if (enable)
2082                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083                         else
2084                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2085                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2086                 }
2087                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2088         }
2089 }
2090
2091 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2092 {
2093         u32 reg;
2094
2095         if (!tg3_flag(tp, 5705_PLUS) ||
2096             (tg3_flag(tp, 5717_PLUS) &&
2097              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098                 return;
2099
2100         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2101                 tg3_phy_fet_toggle_apd(tp, enable);
2102                 return;
2103         }
2104
2105         reg = MII_TG3_MISC_SHDW_WREN |
2106               MII_TG3_MISC_SHDW_SCR5_SEL |
2107               MII_TG3_MISC_SHDW_SCR5_LPED |
2108               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2109               MII_TG3_MISC_SHDW_SCR5_SDTL |
2110               MII_TG3_MISC_SHDW_SCR5_C125OE;
2111         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2112                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2113
2114         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2115
2116
2117         reg = MII_TG3_MISC_SHDW_WREN |
2118               MII_TG3_MISC_SHDW_APD_SEL |
2119               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120         if (enable)
2121                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2122
2123         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2124 }
2125
2126 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2127 {
2128         u32 phy;
2129
2130         if (!tg3_flag(tp, 5705_PLUS) ||
2131             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132                 return;
2133
2134         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135                 u32 ephy;
2136
2137                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2138                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2139
2140                         tg3_writephy(tp, MII_TG3_FET_TEST,
2141                                      ephy | MII_TG3_FET_SHADOW_EN);
2142                         if (!tg3_readphy(tp, reg, &phy)) {
2143                                 if (enable)
2144                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145                                 else
2146                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2147                                 tg3_writephy(tp, reg, phy);
2148                         }
2149                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2150                 }
2151         } else {
2152                 int ret;
2153
2154                 ret = tg3_phy_auxctl_read(tp,
2155                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156                 if (!ret) {
2157                         if (enable)
2158                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159                         else
2160                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2161                         tg3_phy_auxctl_write(tp,
2162                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2163                 }
2164         }
2165 }
2166
2167 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2168 {
2169         int ret;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173                 return;
2174
2175         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176         if (!ret)
2177                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2178                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2179 }
2180
2181 static void tg3_phy_apply_otp(struct tg3 *tp)
2182 {
2183         u32 otp, phy;
2184
2185         if (!tp->phy_otp)
2186                 return;
2187
2188         otp = tp->phy_otp;
2189
2190         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191                 return;
2192
2193         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2194         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2195         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2196
2197         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2198               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2199         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2200
2201         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2202         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2203         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2204
2205         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2206         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2207
2208         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2209         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2210
2211         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2212               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2213         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2214
2215         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2216 }
2217
2218 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2219 {
2220         u32 val;
2221
2222         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2223                 return;
2224
2225         tp->setlpicnt = 0;
2226
2227         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_duplex == DUPLEX_FULL &&
2230             (tp->link_config.active_speed == SPEED_100 ||
2231              tp->link_config.active_speed == SPEED_1000)) {
2232                 u32 eeectl;
2233
2234                 if (tp->link_config.active_speed == SPEED_1000)
2235                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236                 else
2237                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2238
2239                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2240
2241                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2242                                   TG3_CL45_D7_EEERES_STAT, &val);
2243
2244                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2245                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2246                         tp->setlpicnt = 2;
2247         }
2248
2249         if (!tp->setlpicnt) {
2250                 if (current_link_up == 1 &&
2251                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2252                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2253                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254                 }
2255
2256                 val = tr32(TG3_CPMU_EEE_MODE);
2257                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2258         }
2259 }
2260
2261 static void tg3_phy_eee_enable(struct tg3 *tp)
2262 {
2263         u32 val;
2264
2265         if (tp->link_config.active_speed == SPEED_1000 &&
2266             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2267              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2268              tg3_flag(tp, 57765_CLASS)) &&
2269             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2270                 val = MII_TG3_DSP_TAP26_ALNOKO |
2271                       MII_TG3_DSP_TAP26_RMRXSTO;
2272                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2273                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2274         }
2275
2276         val = tr32(TG3_CPMU_EEE_MODE);
2277         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2278 }
2279
2280 static int tg3_wait_macro_done(struct tg3 *tp)
2281 {
2282         int limit = 100;
2283
2284         while (limit--) {
2285                 u32 tmp32;
2286
2287                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2288                         if ((tmp32 & 0x1000) == 0)
2289                                 break;
2290                 }
2291         }
2292         if (limit < 0)
2293                 return -EBUSY;
2294
2295         return 0;
2296 }
2297
2298 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2299 {
2300         static const u32 test_pat[4][6] = {
2301         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2305         };
2306         int chan;
2307
2308         for (chan = 0; chan < 4; chan++) {
2309                 int i;
2310
2311                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2312                              (chan * 0x2000) | 0x0200);
2313                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2314
2315                 for (i = 0; i < 6; i++)
2316                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317                                      test_pat[chan][i]);
2318
2319                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2320                 if (tg3_wait_macro_done(tp)) {
2321                         *resetp = 1;
2322                         return -EBUSY;
2323                 }
2324
2325                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2326                              (chan * 0x2000) | 0x0200);
2327                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2328                 if (tg3_wait_macro_done(tp)) {
2329                         *resetp = 1;
2330                         return -EBUSY;
2331                 }
2332
2333                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2334                 if (tg3_wait_macro_done(tp)) {
2335                         *resetp = 1;
2336                         return -EBUSY;
2337                 }
2338
2339                 for (i = 0; i < 6; i += 2) {
2340                         u32 low, high;
2341
2342                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2343                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2344                             tg3_wait_macro_done(tp)) {
2345                                 *resetp = 1;
2346                                 return -EBUSY;
2347                         }
2348                         low &= 0x7fff;
2349                         high &= 0x000f;
2350                         if (low != test_pat[chan][i] ||
2351                             high != test_pat[chan][i+1]) {
2352                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2353                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2354                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2355
2356                                 return -EBUSY;
2357                         }
2358                 }
2359         }
2360
2361         return 0;
2362 }
2363
2364 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2365 {
2366         int chan;
2367
2368         for (chan = 0; chan < 4; chan++) {
2369                 int i;
2370
2371                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2372                              (chan * 0x2000) | 0x0200);
2373                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2374                 for (i = 0; i < 6; i++)
2375                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2376                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2377                 if (tg3_wait_macro_done(tp))
2378                         return -EBUSY;
2379         }
2380
2381         return 0;
2382 }
2383
2384 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2385 {
2386         u32 reg32, phy9_orig;
2387         int retries, do_phy_reset, err;
2388
2389         retries = 10;
2390         do_phy_reset = 1;
2391         do {
2392                 if (do_phy_reset) {
2393                         err = tg3_bmcr_reset(tp);
2394                         if (err)
2395                                 return err;
2396                         do_phy_reset = 0;
2397                 }
2398
2399                 /* Disable transmitter and interrupt.  */
2400                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2401                         continue;
2402
2403                 reg32 |= 0x3000;
2404                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2405
2406                 /* Set full-duplex, 1000 mbps.  */
2407                 tg3_writephy(tp, MII_BMCR,
2408                              BMCR_FULLDPLX | BMCR_SPEED1000);
2409
2410                 /* Set to master mode.  */
2411                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412                         continue;
2413
2414                 tg3_writephy(tp, MII_CTRL1000,
2415                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2416
2417                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2418                 if (err)
2419                         return err;
2420
2421                 /* Block the PHY control access.  */
2422                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423
2424                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425                 if (!err)
2426                         break;
2427         } while (--retries);
2428
2429         err = tg3_phy_reset_chanpat(tp);
2430         if (err)
2431                 return err;
2432
2433         tg3_phydsp_write(tp, 0x8005, 0x0000);
2434
2435         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2436         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2437
2438         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439
2440         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2441
2442         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2443                 reg32 &= ~0x3000;
2444                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445         } else if (!err)
2446                 err = -EBUSY;
2447
2448         return err;
2449 }
2450
2451 /* This will reset the tigon3 PHY if there is no valid
2452  * link unless the FORCE argument is non-zero.
2453  */
2454 static int tg3_phy_reset(struct tg3 *tp)
2455 {
2456         u32 val, cpmuctrl;
2457         int err;
2458
2459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2460                 val = tr32(GRC_MISC_CFG);
2461                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462                 udelay(40);
2463         }
2464         err  = tg3_readphy(tp, MII_BMSR, &val);
2465         err |= tg3_readphy(tp, MII_BMSR, &val);
2466         if (err != 0)
2467                 return -EBUSY;
2468
2469         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2470                 netif_carrier_off(tp->dev);
2471                 tg3_link_report(tp);
2472         }
2473
2474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2477                 err = tg3_phy_reset_5703_4_5(tp);
2478                 if (err)
2479                         return err;
2480                 goto out;
2481         }
2482
2483         cpmuctrl = 0;
2484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2485             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2486                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2487                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488                         tw32(TG3_CPMU_CTRL,
2489                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2490         }
2491
2492         err = tg3_bmcr_reset(tp);
2493         if (err)
2494                 return err;
2495
2496         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2497                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2498                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2499
2500                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2501         }
2502
2503         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2504             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2505                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2506                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2507                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2508                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509                         udelay(40);
2510                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2511                 }
2512         }
2513
2514         if (tg3_flag(tp, 5717_PLUS) &&
2515             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516                 return 0;
2517
2518         tg3_phy_apply_otp(tp);
2519
2520         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2521                 tg3_phy_toggle_apd(tp, true);
2522         else
2523                 tg3_phy_toggle_apd(tp, false);
2524
2525 out:
2526         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2527             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2528                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2529                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2530                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2531         }
2532
2533         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2534                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2536         }
2537
2538         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2539                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2540                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2541                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2542                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2543                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2544                 }
2545         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2546                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2547                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2548                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2549                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2550                                 tg3_writephy(tp, MII_TG3_TEST1,
2551                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2552                         } else
2553                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2554
2555                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2556                 }
2557         }
2558
2559         /* Set Extended packet length bit (bit 14) on all chips that */
2560         /* support jumbo frames */
2561         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2562                 /* Cannot do read-modify-write on 5401 */
2563                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2564         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2565                 /* Set bit 14 with read-modify-write to preserve other bits */
2566                 err = tg3_phy_auxctl_read(tp,
2567                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568                 if (!err)
2569                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2570                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2571         }
2572
2573         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574          * jumbo frames transmission.
2575          */
2576         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2577                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2578                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2579                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2580         }
2581
2582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2583                 /* adjust output voltage */
2584                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2585         }
2586
2587         tg3_phy_toggle_automdix(tp, 1);
2588         tg3_phy_set_wirespeed(tp);
2589         return 0;
2590 }
2591
2592 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2594 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2595                                           TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600          (TG3_GPIO_MSG_DRVR_PRES << 12))
2601
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606          (TG3_GPIO_MSG_NEED_VAUX << 12))
2607
2608 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2609 {
2610         u32 status, shift;
2611
2612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2614                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615         else
2616                 status = tr32(TG3_CPMU_DRV_STATUS);
2617
2618         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2619         status &= ~(TG3_GPIO_MSG_MASK << shift);
2620         status |= (newstat << shift);
2621
2622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2624                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625         else
2626                 tw32(TG3_CPMU_DRV_STATUS, status);
2627
2628         return status >> TG3_APE_GPIO_MSG_SHIFT;
2629 }
2630
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2632 {
2633         if (!tg3_flag(tp, IS_NIC))
2634                 return 0;
2635
2636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2639                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640                         return -EIO;
2641
2642                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2643
2644                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2645                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2646
2647                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648         } else {
2649                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2650                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2651         }
2652
2653         return 0;
2654 }
2655
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2657 {
2658         u32 grc_local_ctrl;
2659
2660         if (!tg3_flag(tp, IS_NIC) ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663                 return;
2664
2665         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2666
2667         tw32_wait_f(GRC_LOCAL_CTRL,
2668                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2669                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2670
2671         tw32_wait_f(GRC_LOCAL_CTRL,
2672                     grc_local_ctrl,
2673                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2674
2675         tw32_wait_f(GRC_LOCAL_CTRL,
2676                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2677                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2678 }
2679
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2681 {
2682         if (!tg3_flag(tp, IS_NIC))
2683                 return;
2684
2685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2688                             (GRC_LCLCTRL_GPIO_OE0 |
2689                              GRC_LCLCTRL_GPIO_OE1 |
2690                              GRC_LCLCTRL_GPIO_OE2 |
2691                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2692                              GRC_LCLCTRL_GPIO_OUTPUT1),
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2695                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2696                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2698                                      GRC_LCLCTRL_GPIO_OE1 |
2699                                      GRC_LCLCTRL_GPIO_OE2 |
2700                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2701                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2702                                      tp->grc_local_ctrl;
2703                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2704                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2705
2706                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2707                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2708                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2709
2710                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2711                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2712                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2713         } else {
2714                 u32 no_gpio2;
2715                 u32 grc_local_ctrl = 0;
2716
2717                 /* Workaround to prevent overdrawing Amps. */
2718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2719                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2720                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721                                     grc_local_ctrl,
2722                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2723                 }
2724
2725                 /* On 5753 and variants, GPIO2 cannot be used. */
2726                 no_gpio2 = tp->nic_sram_data_cfg &
2727                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2728
2729                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2730                                   GRC_LCLCTRL_GPIO_OE1 |
2731                                   GRC_LCLCTRL_GPIO_OE2 |
2732                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2733                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2734                 if (no_gpio2) {
2735                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2736                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2737                 }
2738                 tw32_wait_f(GRC_LOCAL_CTRL,
2739                             tp->grc_local_ctrl | grc_local_ctrl,
2740                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2741
2742                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2743
2744                 tw32_wait_f(GRC_LOCAL_CTRL,
2745                             tp->grc_local_ctrl | grc_local_ctrl,
2746                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2747
2748                 if (!no_gpio2) {
2749                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2750                         tw32_wait_f(GRC_LOCAL_CTRL,
2751                                     tp->grc_local_ctrl | grc_local_ctrl,
2752                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2753                 }
2754         }
2755 }
2756
2757 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2758 {
2759         u32 msg = 0;
2760
2761         /* Serialize power state transitions */
2762         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763                 return;
2764
2765         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2766                 msg = TG3_GPIO_MSG_NEED_VAUX;
2767
2768         msg = tg3_set_function_status(tp, msg);
2769
2770         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771                 goto done;
2772
2773         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2774                 tg3_pwrsrc_switch_to_vaux(tp);
2775         else
2776                 tg3_pwrsrc_die_with_vmain(tp);
2777
2778 done:
2779         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2780 }
2781
2782 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2783 {
2784         bool need_vaux = false;
2785
2786         /* The GPIOs do something completely different on 57765. */
2787         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788                 return;
2789
2790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2793                 tg3_frob_aux_power_5717(tp, include_wol ?
2794                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2795                 return;
2796         }
2797
2798         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2799                 struct net_device *dev_peer;
2800
2801                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2802
2803                 /* remove_one() may have been run on the peer. */
2804                 if (dev_peer) {
2805                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2806
2807                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2808                                 return;
2809
2810                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2811                             tg3_flag(tp_peer, ENABLE_ASF))
2812                                 need_vaux = true;
2813                 }
2814         }
2815
2816         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2817             tg3_flag(tp, ENABLE_ASF))
2818                 need_vaux = true;
2819
2820         if (need_vaux)
2821                 tg3_pwrsrc_switch_to_vaux(tp);
2822         else
2823                 tg3_pwrsrc_die_with_vmain(tp);
2824 }
2825
2826 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2827 {
2828         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829                 return 1;
2830         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2831                 if (speed != SPEED_10)
2832                         return 1;
2833         } else if (speed == SPEED_10)
2834                 return 1;
2835
2836         return 0;
2837 }
2838
2839 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2840 {
2841         u32 val;
2842
2843         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2845                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2846                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2847
2848                         sg_dig_ctrl |=
2849                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2850                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2851                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2852                 }
2853                 return;
2854         }
2855
2856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857                 tg3_bmcr_reset(tp);
2858                 val = tr32(GRC_MISC_CFG);
2859                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860                 udelay(40);
2861                 return;
2862         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863                 u32 phytest;
2864                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865                         u32 phy;
2866
2867                         tg3_writephy(tp, MII_ADVERTISE, 0);
2868                         tg3_writephy(tp, MII_BMCR,
2869                                      BMCR_ANENABLE | BMCR_ANRESTART);
2870
2871                         tg3_writephy(tp, MII_TG3_FET_TEST,
2872                                      phytest | MII_TG3_FET_SHADOW_EN);
2873                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2874                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875                                 tg3_writephy(tp,
2876                                              MII_TG3_FET_SHDW_AUXMODE4,
2877                                              phy);
2878                         }
2879                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2880                 }
2881                 return;
2882         } else if (do_low_power) {
2883                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2884                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2885
2886                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2887                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2888                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2889                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2890         }
2891
2892         /* The PHY should not be powered down on some chips because
2893          * of bugs.
2894          */
2895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2897             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2898              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2899             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2900              !tp->pci_fn))
2901                 return;
2902
2903         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2904             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2905                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2906                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2907                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2908                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2909         }
2910
2911         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2912 }
2913
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3 *tp)
2916 {
2917         if (tg3_flag(tp, NVRAM)) {
2918                 int i;
2919
2920                 if (tp->nvram_lock_cnt == 0) {
2921                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2922                         for (i = 0; i < 8000; i++) {
2923                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2924                                         break;
2925                                 udelay(20);
2926                         }
2927                         if (i == 8000) {
2928                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2929                                 return -ENODEV;
2930                         }
2931                 }
2932                 tp->nvram_lock_cnt++;
2933         }
2934         return 0;
2935 }
2936
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3 *tp)
2939 {
2940         if (tg3_flag(tp, NVRAM)) {
2941                 if (tp->nvram_lock_cnt > 0)
2942                         tp->nvram_lock_cnt--;
2943                 if (tp->nvram_lock_cnt == 0)
2944                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2945         }
2946 }
2947
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3 *tp)
2950 {
2951         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2952                 u32 nvaccess = tr32(NVRAM_ACCESS);
2953
2954                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2955         }
2956 }
2957
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3 *tp)
2960 {
2961         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2962                 u32 nvaccess = tr32(NVRAM_ACCESS);
2963
2964                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2965         }
2966 }
2967
2968 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2969                                         u32 offset, u32 *val)
2970 {
2971         u32 tmp;
2972         int i;
2973
2974         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975                 return -EINVAL;
2976
2977         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2978                                         EEPROM_ADDR_DEVID_MASK |
2979                                         EEPROM_ADDR_READ);
2980         tw32(GRC_EEPROM_ADDR,
2981              tmp |
2982              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2983              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2984               EEPROM_ADDR_ADDR_MASK) |
2985              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2986
2987         for (i = 0; i < 1000; i++) {
2988                 tmp = tr32(GRC_EEPROM_ADDR);
2989
2990                 if (tmp & EEPROM_ADDR_COMPLETE)
2991                         break;
2992                 msleep(1);
2993         }
2994         if (!(tmp & EEPROM_ADDR_COMPLETE))
2995                 return -EBUSY;
2996
2997         tmp = tr32(GRC_EEPROM_DATA);
2998
2999         /*
3000          * The data will always be opposite the native endian
3001          * format.  Perform a blind byteswap to compensate.
3002          */
3003         *val = swab32(tmp);
3004
3005         return 0;
3006 }
3007
3008 #define NVRAM_CMD_TIMEOUT 10000
3009
3010 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3011 {
3012         int i;
3013
3014         tw32(NVRAM_CMD, nvram_cmd);
3015         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016                 udelay(10);
3017                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3018                         udelay(10);
3019                         break;
3020                 }
3021         }
3022
3023         if (i == NVRAM_CMD_TIMEOUT)
3024                 return -EBUSY;
3025
3026         return 0;
3027 }
3028
3029 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3030 {
3031         if (tg3_flag(tp, NVRAM) &&
3032             tg3_flag(tp, NVRAM_BUFFERED) &&
3033             tg3_flag(tp, FLASH) &&
3034             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3035             (tp->nvram_jedecnum == JEDEC_ATMEL))
3036
3037                 addr = ((addr / tp->nvram_pagesize) <<
3038                         ATMEL_AT45DB0X1B_PAGE_POS) +
3039                        (addr % tp->nvram_pagesize);
3040
3041         return addr;
3042 }
3043
3044 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3045 {
3046         if (tg3_flag(tp, NVRAM) &&
3047             tg3_flag(tp, NVRAM_BUFFERED) &&
3048             tg3_flag(tp, FLASH) &&
3049             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3050             (tp->nvram_jedecnum == JEDEC_ATMEL))
3051
3052                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3053                         tp->nvram_pagesize) +
3054                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3055
3056         return addr;
3057 }
3058
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060  * the byteswapping settings for all other register accesses.
3061  * tg3 devices are BE devices, so on a BE machine, the data
3062  * returned will be exactly as it is seen in NVRAM.  On a LE
3063  * machine, the 32-bit value will be byteswapped.
3064  */
3065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3066 {
3067         int ret;
3068
3069         if (!tg3_flag(tp, NVRAM))
3070                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071
3072         offset = tg3_nvram_phys_addr(tp, offset);
3073
3074         if (offset > NVRAM_ADDR_MSK)
3075                 return -EINVAL;
3076
3077         ret = tg3_nvram_lock(tp);
3078         if (ret)
3079                 return ret;
3080
3081         tg3_enable_nvram_access(tp);
3082
3083         tw32(NVRAM_ADDR, offset);
3084         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3085                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3086
3087         if (ret == 0)
3088                 *val = tr32(NVRAM_RDDATA);
3089
3090         tg3_disable_nvram_access(tp);
3091
3092         tg3_nvram_unlock(tp);
3093
3094         return ret;
3095 }
3096
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3099 {
3100         u32 v;
3101         int res = tg3_nvram_read(tp, offset, &v);
3102         if (!res)
3103                 *val = cpu_to_be32(v);
3104         return res;
3105 }
3106
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3108                                     u32 offset, u32 len, u8 *buf)
3109 {
3110         int i, j, rc = 0;
3111         u32 val;
3112
3113         for (i = 0; i < len; i += 4) {
3114                 u32 addr;
3115                 __be32 data;
3116
3117                 addr = offset + i;
3118
3119                 memcpy(&data, buf + i, 4);
3120
3121                 /*
3122                  * The SEEPROM interface expects the data to always be opposite
3123                  * the native endian format.  We accomplish this by reversing
3124                  * all the operations that would have been performed on the
3125                  * data from a call to tg3_nvram_read_be32().
3126                  */
3127                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3128
3129                 val = tr32(GRC_EEPROM_ADDR);
3130                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3131
3132                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133                         EEPROM_ADDR_READ);
3134                 tw32(GRC_EEPROM_ADDR, val |
3135                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3136                         (addr & EEPROM_ADDR_ADDR_MASK) |
3137                         EEPROM_ADDR_START |
3138                         EEPROM_ADDR_WRITE);
3139
3140                 for (j = 0; j < 1000; j++) {
3141                         val = tr32(GRC_EEPROM_ADDR);
3142
3143                         if (val & EEPROM_ADDR_COMPLETE)
3144                                 break;
3145                         msleep(1);
3146                 }
3147                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3148                         rc = -EBUSY;
3149                         break;
3150                 }
3151         }
3152
3153         return rc;
3154 }
3155
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3158                 u8 *buf)
3159 {
3160         int ret = 0;
3161         u32 pagesize = tp->nvram_pagesize;
3162         u32 pagemask = pagesize - 1;
3163         u32 nvram_cmd;
3164         u8 *tmp;
3165
3166         tmp = kmalloc(pagesize, GFP_KERNEL);
3167         if (tmp == NULL)
3168                 return -ENOMEM;
3169
3170         while (len) {
3171                 int j;
3172                 u32 phy_addr, page_off, size;
3173
3174                 phy_addr = offset & ~pagemask;
3175
3176                 for (j = 0; j < pagesize; j += 4) {
3177                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3178                                                   (__be32 *) (tmp + j));
3179                         if (ret)
3180                                 break;
3181                 }
3182                 if (ret)
3183                         break;
3184
3185                 page_off = offset & pagemask;
3186                 size = pagesize;
3187                 if (len < size)
3188                         size = len;
3189
3190                 len -= size;
3191
3192                 memcpy(tmp + page_off, buf, size);
3193
3194                 offset = offset + (pagesize - page_off);
3195
3196                 tg3_enable_nvram_access(tp);
3197
3198                 /*
3199                  * Before we can erase the flash page, we need
3200                  * to issue a special "write enable" command.
3201                  */
3202                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3203
3204                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205                         break;
3206
3207                 /* Erase the target page */
3208                 tw32(NVRAM_ADDR, phy_addr);
3209
3210                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3211                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3212
3213                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214                         break;
3215
3216                 /* Issue another write enable to start the write. */
3217                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218
3219                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220                         break;
3221
3222                 for (j = 0; j < pagesize; j += 4) {
3223                         __be32 data;
3224
3225                         data = *((__be32 *) (tmp + j));
3226
3227                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3228
3229                         tw32(NVRAM_ADDR, phy_addr + j);
3230
3231                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3232                                 NVRAM_CMD_WR;
3233
3234                         if (j == 0)
3235                                 nvram_cmd |= NVRAM_CMD_FIRST;
3236                         else if (j == (pagesize - 4))
3237                                 nvram_cmd |= NVRAM_CMD_LAST;
3238
3239                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3240                         if (ret)
3241                                 break;
3242                 }
3243                 if (ret)
3244                         break;
3245         }
3246
3247         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248         tg3_nvram_exec_cmd(tp, nvram_cmd);
3249
3250         kfree(tmp);
3251
3252         return ret;
3253 }
3254
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3257                 u8 *buf)
3258 {
3259         int i, ret = 0;
3260
3261         for (i = 0; i < len; i += 4, offset += 4) {
3262                 u32 page_off, phy_addr, nvram_cmd;
3263                 __be32 data;
3264
3265                 memcpy(&data, buf + i, 4);
3266                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3267
3268                 page_off = offset % tp->nvram_pagesize;
3269
3270                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3271
3272                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3273
3274                 if (page_off == 0 || i == 0)
3275                         nvram_cmd |= NVRAM_CMD_FIRST;
3276                 if (page_off == (tp->nvram_pagesize - 4))
3277                         nvram_cmd |= NVRAM_CMD_LAST;
3278
3279                 if (i == (len - 4))
3280                         nvram_cmd |= NVRAM_CMD_LAST;
3281
3282                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3283                     !tg3_flag(tp, FLASH) ||
3284                     !tg3_flag(tp, 57765_PLUS))
3285                         tw32(NVRAM_ADDR, phy_addr);
3286
3287                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3288                     !tg3_flag(tp, 5755_PLUS) &&
3289                     (tp->nvram_jedecnum == JEDEC_ST) &&
3290                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3291                         u32 cmd;
3292
3293                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3294                         ret = tg3_nvram_exec_cmd(tp, cmd);
3295                         if (ret)
3296                                 break;
3297                 }
3298                 if (!tg3_flag(tp, FLASH)) {
3299                         /* We always do complete word writes to eeprom. */
3300                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3301                 }
3302
3303                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3304                 if (ret)
3305                         break;
3306         }
3307         return ret;
3308 }
3309
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3312 {
3313         int ret;
3314
3315         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3316                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3317                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3318                 udelay(40);
3319         }
3320
3321         if (!tg3_flag(tp, NVRAM)) {
3322                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3323         } else {
3324                 u32 grc_mode;
3325
3326                 ret = tg3_nvram_lock(tp);
3327                 if (ret)
3328                         return ret;
3329
3330                 tg3_enable_nvram_access(tp);
3331                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3332                         tw32(NVRAM_WRITE1, 0x406);
3333
3334                 grc_mode = tr32(GRC_MODE);
3335                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3336
3337                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3338                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339                                 buf);
3340                 } else {
3341                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3342                                 buf);
3343                 }
3344
3345                 grc_mode = tr32(GRC_MODE);
3346                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3347
3348                 tg3_disable_nvram_access(tp);
3349                 tg3_nvram_unlock(tp);
3350         }
3351
3352         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3353                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3354                 udelay(40);
3355         }
3356
3357         return ret;
3358 }
3359
3360 #define RX_CPU_SCRATCH_BASE     0x30000
3361 #define RX_CPU_SCRATCH_SIZE     0x04000
3362 #define TX_CPU_SCRATCH_BASE     0x34000
3363 #define TX_CPU_SCRATCH_SIZE     0x04000
3364
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3367 {
3368         int i;
3369
3370         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3371
3372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3373                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3374
3375                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376                 return 0;
3377         }
3378         if (offset == RX_CPU_BASE) {
3379                 for (i = 0; i < 10000; i++) {
3380                         tw32(offset + CPU_STATE, 0xffffffff);
3381                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3382                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3383                                 break;
3384                 }
3385
3386                 tw32(offset + CPU_STATE, 0xffffffff);
3387                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3388                 udelay(10);
3389         } else {
3390                 for (i = 0; i < 10000; i++) {
3391                         tw32(offset + CPU_STATE, 0xffffffff);
3392                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3393                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3394                                 break;
3395                 }
3396         }
3397
3398         if (i >= 10000) {
3399                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3400                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3401                 return -ENODEV;
3402         }
3403
3404         /* Clear firmware's nvram arbitration. */
3405         if (tg3_flag(tp, NVRAM))
3406                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3407         return 0;
3408 }
3409
3410 struct fw_info {
3411         unsigned int fw_base;
3412         unsigned int fw_len;
3413         const __be32 *fw_data;
3414 };
3415
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3418                                  u32 cpu_scratch_base, int cpu_scratch_size,
3419                                  struct fw_info *info)
3420 {
3421         int err, lock_err, i;
3422         void (*write_op)(struct tg3 *, u32, u32);
3423
3424         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425                 netdev_err(tp->dev,
3426                            "%s: Trying to load TX cpu firmware which is 5705\n",
3427                            __func__);
3428                 return -EINVAL;
3429         }
3430
3431         if (tg3_flag(tp, 5705_PLUS))
3432                 write_op = tg3_write_mem;
3433         else
3434                 write_op = tg3_write_indirect_reg32;
3435
3436         /* It is possible that bootcode is still loading at this point.
3437          * Get the nvram lock first before halting the cpu.
3438          */
3439         lock_err = tg3_nvram_lock(tp);
3440         err = tg3_halt_cpu(tp, cpu_base);
3441         if (!lock_err)
3442                 tg3_nvram_unlock(tp);
3443         if (err)
3444                 goto out;
3445
3446         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3447                 write_op(tp, cpu_scratch_base + i, 0);
3448         tw32(cpu_base + CPU_STATE, 0xffffffff);
3449         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3450         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3451                 write_op(tp, (cpu_scratch_base +
3452                               (info->fw_base & 0xffff) +
3453                               (i * sizeof(u32))),
3454                               be32_to_cpu(info->fw_data[i]));
3455
3456         err = 0;
3457
3458 out:
3459         return err;
3460 }
3461
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3464 {
3465         struct fw_info info;
3466         const __be32 *fw_data;
3467         int err, i;
3468
3469         fw_data = (void *)tp->fw->data;
3470
3471         /* Firmware blob starts with version numbers, followed by
3472            start address and length. We are setting complete length.
3473            length = end_address_of_bss - start_address_of_text.
3474            Remainder is the blob to be loaded contiguously
3475            from start address. */
3476
3477         info.fw_base = be32_to_cpu(fw_data[1]);
3478         info.fw_len = tp->fw->size - 12;
3479         info.fw_data = &fw_data[3];
3480
3481         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3482                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3483                                     &info);
3484         if (err)
3485                 return err;
3486
3487         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3488                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3489                                     &info);
3490         if (err)
3491                 return err;
3492
3493         /* Now startup only the RX cpu. */
3494         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3495         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3496
3497         for (i = 0; i < 5; i++) {
3498                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499                         break;
3500                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3501                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3502                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3503                 udelay(1000);
3504         }
3505         if (i >= 5) {
3506                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3507                            "should be %08x\n", __func__,
3508                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509                 return -ENODEV;
3510         }
3511         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3512         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3513
3514         return 0;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3 *tp)
3519 {
3520         struct fw_info info;
3521         const __be32 *fw_data;
3522         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523         int err, i;
3524
3525         if (tg3_flag(tp, HW_TSO_1) ||
3526             tg3_flag(tp, HW_TSO_2) ||
3527             tg3_flag(tp, HW_TSO_3))
3528                 return 0;
3529
3530         fw_data = (void *)tp->fw->data;
3531
3532         /* Firmware blob starts with version numbers, followed by
3533            start address and length. We are setting complete length.
3534            length = end_address_of_bss - start_address_of_text.
3535            Remainder is the blob to be loaded contiguously
3536            from start address. */
3537
3538         info.fw_base = be32_to_cpu(fw_data[1]);
3539         cpu_scratch_size = tp->fw_len;
3540         info.fw_len = tp->fw->size - 12;
3541         info.fw_data = &fw_data[3];
3542
3543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3544                 cpu_base = RX_CPU_BASE;
3545                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546         } else {
3547                 cpu_base = TX_CPU_BASE;
3548                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3549                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3550         }
3551
3552         err = tg3_load_firmware_cpu(tp, cpu_base,
3553                                     cpu_scratch_base, cpu_scratch_size,
3554                                     &info);
3555         if (err)
3556                 return err;
3557
3558         /* Now startup the cpu. */
3559         tw32(cpu_base + CPU_STATE, 0xffffffff);
3560         tw32_f(cpu_base + CPU_PC, info.fw_base);
3561
3562         for (i = 0; i < 5; i++) {
3563                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564                         break;
3565                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3566                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3567                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3568                 udelay(1000);
3569         }
3570         if (i >= 5) {
3571                 netdev_err(tp->dev,
3572                            "%s fails to set CPU PC, is %08x should be %08x\n",
3573                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574                 return -ENODEV;
3575         }
3576         tw32(cpu_base + CPU_STATE, 0xffffffff);
3577         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3578         return 0;
3579 }
3580
3581
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3584 {
3585         u32 addr_high, addr_low;
3586         int i;
3587
3588         addr_high = ((tp->dev->dev_addr[0] << 8) |
3589                      tp->dev->dev_addr[1]);
3590         addr_low = ((tp->dev->dev_addr[2] << 24) |
3591                     (tp->dev->dev_addr[3] << 16) |
3592                     (tp->dev->dev_addr[4] <<  8) |
3593                     (tp->dev->dev_addr[5] <<  0));
3594         for (i = 0; i < 4; i++) {
3595                 if (i == 1 && skip_mac_1)
3596                         continue;
3597                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3598                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3599         }
3600
3601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3602             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3603                 for (i = 0; i < 12; i++) {
3604                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3605                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3606                 }
3607         }
3608
3609         addr_high = (tp->dev->dev_addr[0] +
3610                      tp->dev->dev_addr[1] +
3611                      tp->dev->dev_addr[2] +
3612                      tp->dev->dev_addr[3] +
3613                      tp->dev->dev_addr[4] +
3614                      tp->dev->dev_addr[5]) &
3615                 TX_BACKOFF_SEED_MASK;
3616         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3617 }
3618
3619 static void tg3_enable_register_access(struct tg3 *tp)
3620 {
3621         /*
3622          * Make sure register accesses (indirect or otherwise) will function
3623          * correctly.
3624          */
3625         pci_write_config_dword(tp->pdev,
3626                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3627 }
3628
3629 static int tg3_power_up(struct tg3 *tp)
3630 {
3631         int err;
3632
3633         tg3_enable_register_access(tp);
3634
3635         err = pci_set_power_state(tp->pdev, PCI_D0);
3636         if (!err) {
3637                 /* Switch out of Vaux if it is a NIC */
3638                 tg3_pwrsrc_switch_to_vmain(tp);
3639         } else {
3640                 netdev_err(tp->dev, "Transition to D0 failed\n");
3641         }
3642
3643         return err;
3644 }
3645
3646 static int tg3_setup_phy(struct tg3 *, int);
3647
3648 static int tg3_power_down_prepare(struct tg3 *tp)
3649 {
3650         u32 misc_host_ctrl;
3651         bool device_should_wake, do_low_power;
3652
3653         tg3_enable_register_access(tp);
3654
3655         /* Restore the CLKREQ setting. */
3656         if (tg3_flag(tp, CLKREQ_BUG))
3657                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3658                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3659
3660         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3661         tw32(TG3PCI_MISC_HOST_CTRL,
3662              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3663
3664         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3665                              tg3_flag(tp, WOL_ENABLE);
3666
3667         if (tg3_flag(tp, USE_PHYLIB)) {
3668                 do_low_power = false;
3669                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3670                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3671                         struct phy_device *phydev;
3672                         u32 phyid, advertising;
3673
3674                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3675
3676                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3677
3678                         tp->link_config.speed = phydev->speed;
3679                         tp->link_config.duplex = phydev->duplex;
3680                         tp->link_config.autoneg = phydev->autoneg;
3681                         tp->link_config.advertising = phydev->advertising;
3682
3683                         advertising = ADVERTISED_TP |
3684                                       ADVERTISED_Pause |
3685                                       ADVERTISED_Autoneg |
3686                                       ADVERTISED_10baseT_Half;
3687
3688                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3689                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3690                                         advertising |=
3691                                                 ADVERTISED_100baseT_Half |
3692                                                 ADVERTISED_100baseT_Full |
3693                                                 ADVERTISED_10baseT_Full;
3694                                 else
3695                                         advertising |= ADVERTISED_10baseT_Full;
3696                         }
3697
3698                         phydev->advertising = advertising;
3699
3700                         phy_start_aneg(phydev);
3701
3702                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3703                         if (phyid != PHY_ID_BCMAC131) {
3704                                 phyid &= PHY_BCM_OUI_MASK;
3705                                 if (phyid == PHY_BCM_OUI_1 ||
3706                                     phyid == PHY_BCM_OUI_2 ||
3707                                     phyid == PHY_BCM_OUI_3)
3708                                         do_low_power = true;
3709                         }
3710                 }
3711         } else {
3712                 do_low_power = true;
3713
3714                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3715                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3716
3717                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3718                         tg3_setup_phy(tp, 0);
3719         }
3720
3721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3722                 u32 val;
3723
3724                 val = tr32(GRC_VCPU_EXT_CTRL);
3725                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3726         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3727                 int i;
3728                 u32 val;
3729
3730                 for (i = 0; i < 200; i++) {
3731                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3732                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3733                                 break;
3734                         msleep(1);
3735                 }
3736         }
3737         if (tg3_flag(tp, WOL_CAP))
3738                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3739                                                      WOL_DRV_STATE_SHUTDOWN |
3740                                                      WOL_DRV_WOL |
3741                                                      WOL_SET_MAGIC_PKT);
3742
3743         if (device_should_wake) {
3744                 u32 mac_mode;
3745
3746                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3747                         if (do_low_power &&
3748                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3749                                 tg3_phy_auxctl_write(tp,
3750                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3751                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3752                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3753                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3754                                 udelay(40);
3755                         }
3756
3757                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3758                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3759                         else
3760                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3761
3762                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3763                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3764                             ASIC_REV_5700) {
3765                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3766                                              SPEED_100 : SPEED_10;
3767                                 if (tg3_5700_link_polarity(tp, speed))
3768                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3769                                 else
3770                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3771                         }
3772                 } else {
3773                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3774                 }
3775
3776                 if (!tg3_flag(tp, 5750_PLUS))
3777                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3778
3779                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3780                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3781                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3782                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3783
3784                 if (tg3_flag(tp, ENABLE_APE))
3785                         mac_mode |= MAC_MODE_APE_TX_EN |
3786                                     MAC_MODE_APE_RX_EN |
3787                                     MAC_MODE_TDE_ENABLE;
3788
3789                 tw32_f(MAC_MODE, mac_mode);
3790                 udelay(100);
3791
3792                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3793                 udelay(10);
3794         }
3795
3796         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3797             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3798              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3799                 u32 base_val;
3800
3801                 base_val = tp->pci_clock_ctrl;
3802                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3803                              CLOCK_CTRL_TXCLK_DISABLE);
3804
3805                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3806                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3807         } else if (tg3_flag(tp, 5780_CLASS) ||
3808                    tg3_flag(tp, CPMU_PRESENT) ||
3809                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3810                 /* do nothing */
3811         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3812                 u32 newbits1, newbits2;
3813
3814                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3815                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3816                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3817                                     CLOCK_CTRL_TXCLK_DISABLE |
3818                                     CLOCK_CTRL_ALTCLK);
3819                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3820                 } else if (tg3_flag(tp, 5705_PLUS)) {
3821                         newbits1 = CLOCK_CTRL_625_CORE;
3822                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3823                 } else {
3824                         newbits1 = CLOCK_CTRL_ALTCLK;
3825                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3826                 }
3827
3828                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3829                             40);
3830
3831                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3832                             40);
3833
3834                 if (!tg3_flag(tp, 5705_PLUS)) {
3835                         u32 newbits3;
3836
3837                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3838                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3839                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3840                                             CLOCK_CTRL_TXCLK_DISABLE |
3841                                             CLOCK_CTRL_44MHZ_CORE);
3842                         } else {
3843                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3844                         }
3845
3846                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3847                                     tp->pci_clock_ctrl | newbits3, 40);
3848                 }
3849         }
3850
3851         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3852                 tg3_power_down_phy(tp, do_low_power);
3853
3854         tg3_frob_aux_power(tp, true);
3855
3856         /* Workaround for unstable PLL clock */
3857         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3858             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3859                 u32 val = tr32(0x7d00);
3860
3861                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3862                 tw32(0x7d00, val);
3863                 if (!tg3_flag(tp, ENABLE_ASF)) {
3864                         int err;
3865
3866                         err = tg3_nvram_lock(tp);
3867                         tg3_halt_cpu(tp, RX_CPU_BASE);
3868                         if (!err)
3869                                 tg3_nvram_unlock(tp);
3870                 }
3871         }
3872
3873         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3874
3875         return 0;
3876 }
3877
3878 static void tg3_power_down(struct tg3 *tp)
3879 {
3880         tg3_power_down_prepare(tp);
3881
3882         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3883         pci_set_power_state(tp->pdev, PCI_D3hot);
3884 }
3885
3886 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3887 {
3888         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3889         case MII_TG3_AUX_STAT_10HALF:
3890                 *speed = SPEED_10;
3891                 *duplex = DUPLEX_HALF;
3892                 break;
3893
3894         case MII_TG3_AUX_STAT_10FULL:
3895                 *speed = SPEED_10;
3896                 *duplex = DUPLEX_FULL;
3897                 break;
3898
3899         case MII_TG3_AUX_STAT_100HALF:
3900                 *speed = SPEED_100;
3901                 *duplex = DUPLEX_HALF;
3902                 break;
3903
3904         case MII_TG3_AUX_STAT_100FULL:
3905                 *speed = SPEED_100;
3906                 *duplex = DUPLEX_FULL;
3907                 break;
3908
3909         case MII_TG3_AUX_STAT_1000HALF:
3910                 *speed = SPEED_1000;
3911                 *duplex = DUPLEX_HALF;
3912                 break;
3913
3914         case MII_TG3_AUX_STAT_1000FULL:
3915                 *speed = SPEED_1000;
3916                 *duplex = DUPLEX_FULL;
3917                 break;
3918
3919         default:
3920                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3921                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3922                                  SPEED_10;
3923                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3924                                   DUPLEX_HALF;
3925                         break;
3926                 }
3927                 *speed = SPEED_UNKNOWN;
3928                 *duplex = DUPLEX_UNKNOWN;
3929                 break;
3930         }
3931 }
3932
3933 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3934 {
3935         int err = 0;
3936         u32 val, new_adv;
3937
3938         new_adv = ADVERTISE_CSMA;
3939         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3940         new_adv |= mii_advertise_flowctrl(flowctrl);
3941
3942         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3943         if (err)
3944                 goto done;
3945
3946         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3947                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3948
3949                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3950                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3951                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3952
3953                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3954                 if (err)
3955                         goto done;
3956         }
3957
3958         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3959                 goto done;
3960
3961         tw32(TG3_CPMU_EEE_MODE,
3962              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3963
3964         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3965         if (!err) {
3966                 u32 err2;
3967
3968                 val = 0;
3969                 /* Advertise 100-BaseTX EEE ability */
3970                 if (advertise & ADVERTISED_100baseT_Full)
3971                         val |= MDIO_AN_EEE_ADV_100TX;
3972                 /* Advertise 1000-BaseT EEE ability */
3973                 if (advertise & ADVERTISED_1000baseT_Full)
3974                         val |= MDIO_AN_EEE_ADV_1000T;
3975                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3976                 if (err)
3977                         val = 0;
3978
3979                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3980                 case ASIC_REV_5717:
3981                 case ASIC_REV_57765:
3982                 case ASIC_REV_57766:
3983                 case ASIC_REV_5719:
3984                         /* If we advertised any eee advertisements above... */
3985                         if (val)
3986                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3987                                       MII_TG3_DSP_TAP26_RMRXSTO |
3988                                       MII_TG3_DSP_TAP26_OPCSINPT;
3989                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3990                         /* Fall through */
3991                 case ASIC_REV_5720:
3992                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3993                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3994                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3995                 }
3996
3997                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3998                 if (!err)
3999                         err = err2;
4000         }
4001
4002 done:
4003         return err;
4004 }
4005
4006 static void tg3_phy_copper_begin(struct tg3 *tp)
4007 {
4008         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4009             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4010                 u32 adv, fc;
4011
4012                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4013                         adv = ADVERTISED_10baseT_Half |
4014                               ADVERTISED_10baseT_Full;
4015                         if (tg3_flag(tp, WOL_SPEED_100MB))
4016                                 adv |= ADVERTISED_100baseT_Half |
4017                                        ADVERTISED_100baseT_Full;
4018
4019                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4020                 } else {
4021                         adv = tp->link_config.advertising;
4022                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4023                                 adv &= ~(ADVERTISED_1000baseT_Half |
4024                                          ADVERTISED_1000baseT_Full);
4025
4026                         fc = tp->link_config.flowctrl;
4027                 }
4028
4029                 tg3_phy_autoneg_cfg(tp, adv, fc);
4030
4031                 tg3_writephy(tp, MII_BMCR,
4032                              BMCR_ANENABLE | BMCR_ANRESTART);
4033         } else {
4034                 int i;
4035                 u32 bmcr, orig_bmcr;
4036
4037                 tp->link_config.active_speed = tp->link_config.speed;
4038                 tp->link_config.active_duplex = tp->link_config.duplex;
4039
4040                 bmcr = 0;
4041                 switch (tp->link_config.speed) {
4042                 default:
4043                 case SPEED_10:
4044                         break;
4045
4046                 case SPEED_100:
4047                         bmcr |= BMCR_SPEED100;
4048                         break;
4049
4050                 case SPEED_1000:
4051                         bmcr |= BMCR_SPEED1000;
4052                         break;
4053                 }
4054
4055                 if (tp->link_config.duplex == DUPLEX_FULL)
4056                         bmcr |= BMCR_FULLDPLX;
4057
4058                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4059                     (bmcr != orig_bmcr)) {
4060                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4061                         for (i = 0; i < 1500; i++) {
4062                                 u32 tmp;
4063
4064                                 udelay(10);
4065                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4066                                     tg3_readphy(tp, MII_BMSR, &tmp))
4067                                         continue;
4068                                 if (!(tmp & BMSR_LSTATUS)) {
4069                                         udelay(40);
4070                                         break;
4071                                 }
4072                         }
4073                         tg3_writephy(tp, MII_BMCR, bmcr);
4074                         udelay(40);
4075                 }
4076         }
4077 }
4078
4079 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4080 {
4081         int err;
4082
4083         /* Turn off tap power management. */
4084         /* Set Extended packet length bit */
4085         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4086
4087         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4088         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4089         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4090         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4091         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4092
4093         udelay(40);
4094
4095         return err;
4096 }
4097
4098 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4099 {
4100         u32 advmsk, tgtadv, advertising;
4101
4102         advertising = tp->link_config.advertising;
4103         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4104
4105         advmsk = ADVERTISE_ALL;
4106         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4107                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4108                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4109         }
4110
4111         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4112                 return false;
4113
4114         if ((*lcladv & advmsk) != tgtadv)
4115                 return false;
4116
4117         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4118                 u32 tg3_ctrl;
4119
4120                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4121
4122                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4123                         return false;
4124
4125                 if (tgtadv &&
4126                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4127                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4128                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4129                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4130                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4131                 } else {
4132                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4133                 }
4134
4135                 if (tg3_ctrl != tgtadv)
4136                         return false;
4137         }
4138
4139         return true;
4140 }
4141
4142 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4143 {
4144         u32 lpeth = 0;
4145
4146         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4147                 u32 val;
4148
4149                 if (tg3_readphy(tp, MII_STAT1000, &val))
4150                         return false;
4151
4152                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4153         }
4154
4155         if (tg3_readphy(tp, MII_LPA, rmtadv))
4156                 return false;
4157
4158         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4159         tp->link_config.rmt_adv = lpeth;
4160
4161         return true;
4162 }
4163
4164 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4165 {
4166         int current_link_up;
4167         u32 bmsr, val;
4168         u32 lcl_adv, rmt_adv;
4169         u16 current_speed;
4170         u8 current_duplex;
4171         int i, err;
4172
4173         tw32(MAC_EVENT, 0);
4174
4175         tw32_f(MAC_STATUS,
4176              (MAC_STATUS_SYNC_CHANGED |
4177               MAC_STATUS_CFG_CHANGED |
4178               MAC_STATUS_MI_COMPLETION |
4179               MAC_STATUS_LNKSTATE_CHANGED));
4180         udelay(40);
4181
4182         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4183                 tw32_f(MAC_MI_MODE,
4184                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4185                 udelay(80);
4186         }
4187
4188         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4189
4190         /* Some third-party PHYs need to be reset on link going
4191          * down.
4192          */
4193         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4194              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4195              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4196             netif_carrier_ok(tp->dev)) {
4197                 tg3_readphy(tp, MII_BMSR, &bmsr);
4198                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4199                     !(bmsr & BMSR_LSTATUS))
4200                         force_reset = 1;
4201         }
4202         if (force_reset)
4203                 tg3_phy_reset(tp);
4204
4205         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4206                 tg3_readphy(tp, MII_BMSR, &bmsr);
4207                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4208                     !tg3_flag(tp, INIT_COMPLETE))
4209                         bmsr = 0;
4210
4211                 if (!(bmsr & BMSR_LSTATUS)) {
4212                         err = tg3_init_5401phy_dsp(tp);
4213                         if (err)
4214                                 return err;
4215
4216                         tg3_readphy(tp, MII_BMSR, &bmsr);
4217                         for (i = 0; i < 1000; i++) {
4218                                 udelay(10);
4219                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4220                                     (bmsr & BMSR_LSTATUS)) {
4221                                         udelay(40);
4222                                         break;
4223                                 }
4224                         }
4225
4226                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4227                             TG3_PHY_REV_BCM5401_B0 &&
4228                             !(bmsr & BMSR_LSTATUS) &&
4229                             tp->link_config.active_speed == SPEED_1000) {
4230                                 err = tg3_phy_reset(tp);
4231                                 if (!err)
4232                                         err = tg3_init_5401phy_dsp(tp);
4233                                 if (err)
4234                                         return err;
4235                         }
4236                 }
4237         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4238                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4239                 /* 5701 {A0,B0} CRC bug workaround */
4240                 tg3_writephy(tp, 0x15, 0x0a75);
4241                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4242                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4243                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4244         }
4245
4246         /* Clear pending interrupts... */
4247         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4248         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4249
4250         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4251                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4252         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4253                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4254
4255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4256             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4257                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4258                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4259                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4260                 else
4261                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4262         }
4263
4264         current_link_up = 0;
4265         current_speed = SPEED_UNKNOWN;
4266         current_duplex = DUPLEX_UNKNOWN;
4267         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4268         tp->link_config.rmt_adv = 0;
4269
4270         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4271                 err = tg3_phy_auxctl_read(tp,
4272                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4273                                           &val);
4274                 if (!err && !(val & (1 << 10))) {
4275                         tg3_phy_auxctl_write(tp,
4276                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4277                                              val | (1 << 10));
4278                         goto relink;
4279                 }
4280         }
4281
4282         bmsr = 0;
4283         for (i = 0; i < 100; i++) {
4284                 tg3_readphy(tp, MII_BMSR, &bmsr);
4285                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4286                     (bmsr & BMSR_LSTATUS))
4287                         break;
4288                 udelay(40);
4289         }
4290
4291         if (bmsr & BMSR_LSTATUS) {
4292                 u32 aux_stat, bmcr;
4293
4294                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4295                 for (i = 0; i < 2000; i++) {
4296                         udelay(10);
4297                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4298                             aux_stat)
4299                                 break;
4300                 }
4301
4302                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4303                                              &current_speed,
4304                                              &current_duplex);
4305
4306                 bmcr = 0;
4307                 for (i = 0; i < 200; i++) {
4308                         tg3_readphy(tp, MII_BMCR, &bmcr);
4309                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4310                                 continue;
4311                         if (bmcr && bmcr != 0x7fff)
4312                                 break;
4313                         udelay(10);
4314                 }
4315
4316                 lcl_adv = 0;
4317                 rmt_adv = 0;
4318
4319                 tp->link_config.active_speed = current_speed;
4320                 tp->link_config.active_duplex = current_duplex;
4321
4322                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4323                         if ((bmcr & BMCR_ANENABLE) &&
4324                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4325                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4326                                 current_link_up = 1;
4327                 } else {
4328                         if (!(bmcr & BMCR_ANENABLE) &&
4329                             tp->link_config.speed == current_speed &&
4330                             tp->link_config.duplex == current_duplex &&
4331                             tp->link_config.flowctrl ==
4332                             tp->link_config.active_flowctrl) {
4333                                 current_link_up = 1;
4334                         }
4335                 }
4336
4337                 if (current_link_up == 1 &&
4338                     tp->link_config.active_duplex == DUPLEX_FULL) {
4339                         u32 reg, bit;
4340
4341                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4342                                 reg = MII_TG3_FET_GEN_STAT;
4343                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4344                         } else {
4345                                 reg = MII_TG3_EXT_STAT;
4346                                 bit = MII_TG3_EXT_STAT_MDIX;
4347                         }
4348
4349                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4350                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4351
4352                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4353                 }
4354         }
4355
4356 relink:
4357         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4358                 tg3_phy_copper_begin(tp);
4359
4360                 tg3_readphy(tp, MII_BMSR, &bmsr);
4361                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4362                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4363                         current_link_up = 1;
4364         }
4365
4366         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4367         if (current_link_up == 1) {
4368                 if (tp->link_config.active_speed == SPEED_100 ||
4369                     tp->link_config.active_speed == SPEED_10)
4370                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4371                 else
4372                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4373         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4374                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4375         else
4376                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4377
4378         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4379         if (tp->link_config.active_duplex == DUPLEX_HALF)
4380                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4381
4382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4383                 if (current_link_up == 1 &&
4384                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4385                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4386                 else
4387                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4388         }
4389
4390         /* ??? Without this setting Netgear GA302T PHY does not
4391          * ??? send/receive packets...
4392          */
4393         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4394             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4395                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4396                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4397                 udelay(80);
4398         }
4399
4400         tw32_f(MAC_MODE, tp->mac_mode);
4401         udelay(40);
4402
4403         tg3_phy_eee_adjust(tp, current_link_up);
4404
4405         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4406                 /* Polled via timer. */
4407                 tw32_f(MAC_EVENT, 0);
4408         } else {
4409                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4410         }
4411         udelay(40);
4412
4413         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4414             current_link_up == 1 &&
4415             tp->link_config.active_speed == SPEED_1000 &&
4416             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4417                 udelay(120);
4418                 tw32_f(MAC_STATUS,
4419                      (MAC_STATUS_SYNC_CHANGED |
4420                       MAC_STATUS_CFG_CHANGED));
4421                 udelay(40);
4422                 tg3_write_mem(tp,
4423                               NIC_SRAM_FIRMWARE_MBOX,
4424                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4425         }
4426
4427         /* Prevent send BD corruption. */
4428         if (tg3_flag(tp, CLKREQ_BUG)) {
4429                 if (tp->link_config.active_speed == SPEED_100 ||
4430                     tp->link_config.active_speed == SPEED_10)
4431                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4432                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4433                 else
4434                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4435                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4436         }
4437
4438         if (current_link_up != netif_carrier_ok(tp->dev)) {
4439                 if (current_link_up)
4440                         netif_carrier_on(tp->dev);
4441                 else
4442                         netif_carrier_off(tp->dev);
4443                 tg3_link_report(tp);
4444         }
4445
4446         return 0;
4447 }
4448
4449 struct tg3_fiber_aneginfo {
4450         int state;
4451 #define ANEG_STATE_UNKNOWN              0
4452 #define ANEG_STATE_AN_ENABLE            1
4453 #define ANEG_STATE_RESTART_INIT         2
4454 #define ANEG_STATE_RESTART              3
4455 #define ANEG_STATE_DISABLE_LINK_OK      4
4456 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4457 #define ANEG_STATE_ABILITY_DETECT       6
4458 #define ANEG_STATE_ACK_DETECT_INIT      7
4459 #define ANEG_STATE_ACK_DETECT           8
4460 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4461 #define ANEG_STATE_COMPLETE_ACK         10
4462 #define ANEG_STATE_IDLE_DETECT_INIT     11
4463 #define ANEG_STATE_IDLE_DETECT          12
4464 #define ANEG_STATE_LINK_OK              13
4465 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4466 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4467
4468         u32 flags;
4469 #define MR_AN_ENABLE            0x00000001
4470 #define MR_RESTART_AN           0x00000002
4471 #define MR_AN_COMPLETE          0x00000004
4472 #define MR_PAGE_RX              0x00000008
4473 #define MR_NP_LOADED            0x00000010
4474 #define MR_TOGGLE_TX            0x00000020
4475 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4476 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4477 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4478 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4479 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4480 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4481 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4482 #define MR_TOGGLE_RX            0x00002000
4483 #define MR_NP_RX                0x00004000
4484
4485 #define MR_LINK_OK              0x80000000
4486
4487         unsigned long link_time, cur_time;
4488
4489         u32 ability_match_cfg;
4490         int ability_match_count;
4491
4492         char ability_match, idle_match, ack_match;
4493
4494         u32 txconfig, rxconfig;
4495 #define ANEG_CFG_NP             0x00000080
4496 #define ANEG_CFG_ACK            0x00000040
4497 #define ANEG_CFG_RF2            0x00000020
4498 #define ANEG_CFG_RF1            0x00000010
4499 #define ANEG_CFG_PS2            0x00000001
4500 #define ANEG_CFG_PS1            0x00008000
4501 #define ANEG_CFG_HD             0x00004000
4502 #define ANEG_CFG_FD             0x00002000
4503 #define ANEG_CFG_INVAL          0x00001f06
4504
4505 };
4506 #define ANEG_OK         0
4507 #define ANEG_DONE       1
4508 #define ANEG_TIMER_ENAB 2
4509 #define ANEG_FAILED     -1
4510
4511 #define ANEG_STATE_SETTLE_TIME  10000
4512
4513 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4514                                    struct tg3_fiber_aneginfo *ap)
4515 {
4516         u16 flowctrl;
4517         unsigned long delta;
4518         u32 rx_cfg_reg;
4519         int ret;
4520
4521         if (ap->state == ANEG_STATE_UNKNOWN) {
4522                 ap->rxconfig = 0;
4523                 ap->link_time = 0;
4524                 ap->cur_time = 0;
4525                 ap->ability_match_cfg = 0;
4526                 ap->ability_match_count = 0;
4527                 ap->ability_match = 0;
4528                 ap->idle_match = 0;
4529                 ap->ack_match = 0;
4530         }
4531         ap->cur_time++;
4532
4533         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4534                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4535
4536                 if (rx_cfg_reg != ap->ability_match_cfg) {
4537                         ap->ability_match_cfg = rx_cfg_reg;
4538                         ap->ability_match = 0;
4539                         ap->ability_match_count = 0;
4540                 } else {
4541                         if (++ap->ability_match_count > 1) {
4542                                 ap->ability_match = 1;
4543                                 ap->ability_match_cfg = rx_cfg_reg;
4544                         }
4545                 }
4546                 if (rx_cfg_reg & ANEG_CFG_ACK)
4547                         ap->ack_match = 1;
4548                 else
4549                         ap->ack_match = 0;
4550
4551                 ap->idle_match = 0;
4552         } else {
4553                 ap->idle_match = 1;
4554                 ap->ability_match_cfg = 0;
4555                 ap->ability_match_count = 0;
4556                 ap->ability_match = 0;
4557                 ap->ack_match = 0;
4558
4559                 rx_cfg_reg = 0;
4560         }
4561
4562         ap->rxconfig = rx_cfg_reg;
4563         ret = ANEG_OK;
4564
4565         switch (ap->state) {
4566         case ANEG_STATE_UNKNOWN:
4567                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4568                         ap->state = ANEG_STATE_AN_ENABLE;
4569
4570                 /* fallthru */
4571         case ANEG_STATE_AN_ENABLE:
4572                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4573                 if (ap->flags & MR_AN_ENABLE) {
4574                         ap->link_time = 0;
4575                         ap->cur_time = 0;
4576                         ap->ability_match_cfg = 0;
4577                         ap->ability_match_count = 0;
4578                         ap->ability_match = 0;
4579                         ap->idle_match = 0;
4580                         ap->ack_match = 0;
4581
4582                         ap->state = ANEG_STATE_RESTART_INIT;
4583                 } else {
4584                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4585                 }
4586                 break;
4587
4588         case ANEG_STATE_RESTART_INIT:
4589                 ap->link_time = ap->cur_time;
4590                 ap->flags &= ~(MR_NP_LOADED);
4591                 ap->txconfig = 0;
4592                 tw32(MAC_TX_AUTO_NEG, 0);
4593                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4594                 tw32_f(MAC_MODE, tp->mac_mode);
4595                 udelay(40);
4596
4597                 ret = ANEG_TIMER_ENAB;
4598                 ap->state = ANEG_STATE_RESTART;
4599
4600                 /* fallthru */
4601         case ANEG_STATE_RESTART:
4602                 delta = ap->cur_time - ap->link_time;
4603                 if (delta > ANEG_STATE_SETTLE_TIME)
4604                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4605                 else
4606                         ret = ANEG_TIMER_ENAB;
4607                 break;
4608
4609         case ANEG_STATE_DISABLE_LINK_OK:
4610                 ret = ANEG_DONE;
4611                 break;
4612
4613         case ANEG_STATE_ABILITY_DETECT_INIT:
4614                 ap->flags &= ~(MR_TOGGLE_TX);
4615                 ap->txconfig = ANEG_CFG_FD;
4616                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4617                 if (flowctrl & ADVERTISE_1000XPAUSE)
4618                         ap->txconfig |= ANEG_CFG_PS1;
4619                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4620                         ap->txconfig |= ANEG_CFG_PS2;
4621                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4622                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4623                 tw32_f(MAC_MODE, tp->mac_mode);
4624                 udelay(40);
4625
4626                 ap->state = ANEG_STATE_ABILITY_DETECT;
4627                 break;
4628
4629         case ANEG_STATE_ABILITY_DETECT:
4630                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4631                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4632                 break;
4633
4634         case ANEG_STATE_ACK_DETECT_INIT:
4635                 ap->txconfig |= ANEG_CFG_ACK;
4636                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4637                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4638                 tw32_f(MAC_MODE, tp->mac_mode);
4639                 udelay(40);
4640
4641                 ap->state = ANEG_STATE_ACK_DETECT;
4642
4643                 /* fallthru */
4644         case ANEG_STATE_ACK_DETECT:
4645                 if (ap->ack_match != 0) {
4646                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4647                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4648                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4649                         } else {
4650                                 ap->state = ANEG_STATE_AN_ENABLE;
4651                         }
4652                 } else if (ap->ability_match != 0 &&
4653                            ap->rxconfig == 0) {
4654                         ap->state = ANEG_STATE_AN_ENABLE;
4655                 }
4656                 break;
4657
4658         case ANEG_STATE_COMPLETE_ACK_INIT:
4659                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4660                         ret = ANEG_FAILED;
4661                         break;
4662                 }
4663                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4664                                MR_LP_ADV_HALF_DUPLEX |
4665                                MR_LP_ADV_SYM_PAUSE |
4666                                MR_LP_ADV_ASYM_PAUSE |
4667                                MR_LP_ADV_REMOTE_FAULT1 |
4668                                MR_LP_ADV_REMOTE_FAULT2 |
4669                                MR_LP_ADV_NEXT_PAGE |
4670                                MR_TOGGLE_RX |
4671                                MR_NP_RX);
4672                 if (ap->rxconfig & ANEG_CFG_FD)
4673                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4674                 if (ap->rxconfig & ANEG_CFG_HD)
4675                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4676                 if (ap->rxconfig & ANEG_CFG_PS1)
4677                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4678                 if (ap->rxconfig & ANEG_CFG_PS2)
4679                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4680                 if (ap->rxconfig & ANEG_CFG_RF1)
4681                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4682                 if (ap->rxconfig & ANEG_CFG_RF2)
4683                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4684                 if (ap->rxconfig & ANEG_CFG_NP)
4685                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4686
4687                 ap->link_time = ap->cur_time;
4688
4689                 ap->flags ^= (MR_TOGGLE_TX);
4690                 if (ap->rxconfig & 0x0008)
4691                         ap->flags |= MR_TOGGLE_RX;
4692                 if (ap->rxconfig & ANEG_CFG_NP)
4693                         ap->flags |= MR_NP_RX;
4694                 ap->flags |= MR_PAGE_RX;
4695
4696                 ap->state = ANEG_STATE_COMPLETE_ACK;
4697                 ret = ANEG_TIMER_ENAB;
4698                 break;
4699
4700         case ANEG_STATE_COMPLETE_ACK:
4701                 if (ap->ability_match != 0 &&
4702                     ap->rxconfig == 0) {
4703                         ap->state = ANEG_STATE_AN_ENABLE;
4704                         break;
4705                 }
4706                 delta = ap->cur_time - ap->link_time;
4707                 if (delta > ANEG_STATE_SETTLE_TIME) {
4708                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4709                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4710                         } else {
4711                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4712                                     !(ap->flags & MR_NP_RX)) {
4713                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4714                                 } else {
4715                                         ret = ANEG_FAILED;
4716                                 }
4717                         }
4718                 }
4719                 break;
4720
4721         case ANEG_STATE_IDLE_DETECT_INIT:
4722                 ap->link_time = ap->cur_time;
4723                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4724                 tw32_f(MAC_MODE, tp->mac_mode);
4725                 udelay(40);
4726
4727                 ap->state = ANEG_STATE_IDLE_DETECT;
4728                 ret = ANEG_TIMER_ENAB;
4729                 break;
4730
4731         case ANEG_STATE_IDLE_DETECT:
4732                 if (ap->ability_match != 0 &&
4733                     ap->rxconfig == 0) {
4734                         ap->state = ANEG_STATE_AN_ENABLE;
4735                         break;
4736                 }
4737                 delta = ap->cur_time - ap->link_time;
4738                 if (delta > ANEG_STATE_SETTLE_TIME) {
4739                         /* XXX another gem from the Broadcom driver :( */
4740                         ap->state = ANEG_STATE_LINK_OK;
4741                 }
4742                 break;
4743
4744         case ANEG_STATE_LINK_OK:
4745                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4746                 ret = ANEG_DONE;
4747                 break;
4748
4749         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4750                 /* ??? unimplemented */
4751                 break;
4752
4753         case ANEG_STATE_NEXT_PAGE_WAIT:
4754                 /* ??? unimplemented */
4755                 break;
4756
4757         default:
4758                 ret = ANEG_FAILED;
4759                 break;
4760         }
4761
4762         return ret;
4763 }
4764
4765 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4766 {
4767         int res = 0;
4768         struct tg3_fiber_aneginfo aninfo;
4769         int status = ANEG_FAILED;
4770         unsigned int tick;
4771         u32 tmp;
4772
4773         tw32_f(MAC_TX_AUTO_NEG, 0);
4774
4775         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4776         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4777         udelay(40);
4778
4779         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4780         udelay(40);
4781
4782         memset(&aninfo, 0, sizeof(aninfo));
4783         aninfo.flags |= MR_AN_ENABLE;
4784         aninfo.state = ANEG_STATE_UNKNOWN;
4785         aninfo.cur_time = 0;
4786         tick = 0;
4787         while (++tick < 195000) {
4788                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4789                 if (status == ANEG_DONE || status == ANEG_FAILED)
4790                         break;
4791
4792                 udelay(1);
4793         }
4794
4795         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4796         tw32_f(MAC_MODE, tp->mac_mode);
4797         udelay(40);
4798
4799         *txflags = aninfo.txconfig;
4800         *rxflags = aninfo.flags;
4801
4802         if (status == ANEG_DONE &&
4803             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4804                              MR_LP_ADV_FULL_DUPLEX)))
4805                 res = 1;
4806
4807         return res;
4808 }
4809
4810 static void tg3_init_bcm8002(struct tg3 *tp)
4811 {
4812         u32 mac_status = tr32(MAC_STATUS);
4813         int i;
4814
4815         /* Reset when initting first time or we have a link. */
4816         if (tg3_flag(tp, INIT_COMPLETE) &&
4817             !(mac_status & MAC_STATUS_PCS_SYNCED))
4818                 return;
4819
4820         /* Set PLL lock range. */
4821         tg3_writephy(tp, 0x16, 0x8007);
4822
4823         /* SW reset */
4824         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4825
4826         /* Wait for reset to complete. */
4827         /* XXX schedule_timeout() ... */
4828         for (i = 0; i < 500; i++)
4829                 udelay(10);
4830
4831         /* Config mode; select PMA/Ch 1 regs. */
4832         tg3_writephy(tp, 0x10, 0x8411);
4833
4834         /* Enable auto-lock and comdet, select txclk for tx. */
4835         tg3_writephy(tp, 0x11, 0x0a10);
4836
4837         tg3_writephy(tp, 0x18, 0x00a0);
4838         tg3_writephy(tp, 0x16, 0x41ff);
4839
4840         /* Assert and deassert POR. */
4841         tg3_writephy(tp, 0x13, 0x0400);
4842         udelay(40);
4843         tg3_writephy(tp, 0x13, 0x0000);
4844
4845         tg3_writephy(tp, 0x11, 0x0a50);
4846         udelay(40);
4847         tg3_writephy(tp, 0x11, 0x0a10);
4848
4849         /* Wait for signal to stabilize */
4850         /* XXX schedule_timeout() ... */
4851         for (i = 0; i < 15000; i++)
4852                 udelay(10);
4853
4854         /* Deselect the channel register so we can read the PHYID
4855          * later.
4856          */
4857         tg3_writephy(tp, 0x10, 0x8011);
4858 }
4859
4860 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4861 {
4862         u16 flowctrl;
4863         u32 sg_dig_ctrl, sg_dig_status;
4864         u32 serdes_cfg, expected_sg_dig_ctrl;
4865         int workaround, port_a;
4866         int current_link_up;
4867
4868         serdes_cfg = 0;
4869         expected_sg_dig_ctrl = 0;
4870         workaround = 0;
4871         port_a = 1;
4872         current_link_up = 0;
4873
4874         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4875             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4876                 workaround = 1;
4877                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4878                         port_a = 0;
4879
4880                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4881                 /* preserve bits 20-23 for voltage regulator */
4882                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4883         }
4884
4885         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4886
4887         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4888                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4889                         if (workaround) {
4890                                 u32 val = serdes_cfg;
4891
4892                                 if (port_a)
4893                                         val |= 0xc010000;
4894                                 else
4895                                         val |= 0x4010000;
4896                                 tw32_f(MAC_SERDES_CFG, val);
4897                         }
4898
4899                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4900                 }
4901                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4902                         tg3_setup_flow_control(tp, 0, 0);
4903                         current_link_up = 1;
4904                 }
4905                 goto out;
4906         }
4907
4908         /* Want auto-negotiation.  */
4909         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4910
4911         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4912         if (flowctrl & ADVERTISE_1000XPAUSE)
4913                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4914         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4915                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4916
4917         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4918                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4919                     tp->serdes_counter &&
4920                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4921                                     MAC_STATUS_RCVD_CFG)) ==
4922                      MAC_STATUS_PCS_SYNCED)) {
4923                         tp->serdes_counter--;
4924                         current_link_up = 1;
4925                         goto out;
4926                 }
4927 restart_autoneg:
4928                 if (workaround)
4929                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4930                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4931                 udelay(5);
4932                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4933
4934                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4935                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4936         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4937                                  MAC_STATUS_SIGNAL_DET)) {
4938                 sg_dig_status = tr32(SG_DIG_STATUS);
4939                 mac_status = tr32(MAC_STATUS);
4940
4941                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4942                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4943                         u32 local_adv = 0, remote_adv = 0;
4944
4945                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4946                                 local_adv |= ADVERTISE_1000XPAUSE;
4947                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4948                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4949
4950                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4951                                 remote_adv |= LPA_1000XPAUSE;
4952                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4953                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4954
4955                         tp->link_config.rmt_adv =
4956                                            mii_adv_to_ethtool_adv_x(remote_adv);
4957
4958                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4959                         current_link_up = 1;
4960                         tp->serdes_counter = 0;
4961                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4962                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4963                         if (tp->serdes_counter)
4964                                 tp->serdes_counter--;
4965                         else {
4966                                 if (workaround) {
4967                                         u32 val = serdes_cfg;
4968
4969                                         if (port_a)
4970                                                 val |= 0xc010000;
4971                                         else
4972                                                 val |= 0x4010000;
4973
4974                                         tw32_f(MAC_SERDES_CFG, val);
4975                                 }
4976
4977                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4978                                 udelay(40);
4979
4980                                 /* Link parallel detection - link is up */
4981                                 /* only if we have PCS_SYNC and not */
4982                                 /* receiving config code words */
4983                                 mac_status = tr32(MAC_STATUS);
4984                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4985                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4986                                         tg3_setup_flow_control(tp, 0, 0);
4987                                         current_link_up = 1;
4988                                         tp->phy_flags |=
4989                                                 TG3_PHYFLG_PARALLEL_DETECT;
4990                                         tp->serdes_counter =
4991                                                 SERDES_PARALLEL_DET_TIMEOUT;
4992                                 } else
4993                                         goto restart_autoneg;
4994                         }
4995                 }
4996         } else {
4997                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4998                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999         }
5000
5001 out:
5002         return current_link_up;
5003 }
5004
5005 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5006 {
5007         int current_link_up = 0;
5008
5009         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5010                 goto out;
5011
5012         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5013                 u32 txflags, rxflags;
5014                 int i;
5015
5016                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5017                         u32 local_adv = 0, remote_adv = 0;
5018
5019                         if (txflags & ANEG_CFG_PS1)
5020                                 local_adv |= ADVERTISE_1000XPAUSE;
5021                         if (txflags & ANEG_CFG_PS2)
5022                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5023
5024                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5025                                 remote_adv |= LPA_1000XPAUSE;
5026                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5027                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5028
5029                         tp->link_config.rmt_adv =
5030                                            mii_adv_to_ethtool_adv_x(remote_adv);
5031
5032                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5033
5034                         current_link_up = 1;
5035                 }
5036                 for (i = 0; i < 30; i++) {
5037                         udelay(20);
5038                         tw32_f(MAC_STATUS,
5039                                (MAC_STATUS_SYNC_CHANGED |
5040                                 MAC_STATUS_CFG_CHANGED));
5041                         udelay(40);
5042                         if ((tr32(MAC_STATUS) &
5043                              (MAC_STATUS_SYNC_CHANGED |
5044                               MAC_STATUS_CFG_CHANGED)) == 0)
5045                                 break;
5046                 }
5047
5048                 mac_status = tr32(MAC_STATUS);
5049                 if (current_link_up == 0 &&
5050                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5051                     !(mac_status & MAC_STATUS_RCVD_CFG))
5052                         current_link_up = 1;
5053         } else {
5054                 tg3_setup_flow_control(tp, 0, 0);
5055
5056                 /* Forcing 1000FD link up. */
5057                 current_link_up = 1;
5058
5059                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5060                 udelay(40);
5061
5062                 tw32_f(MAC_MODE, tp->mac_mode);
5063                 udelay(40);
5064         }
5065
5066 out:
5067         return current_link_up;
5068 }
5069
5070 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5071 {
5072         u32 orig_pause_cfg;
5073         u16 orig_active_speed;
5074         u8 orig_active_duplex;
5075         u32 mac_status;
5076         int current_link_up;
5077         int i;
5078
5079         orig_pause_cfg = tp->link_config.active_flowctrl;
5080         orig_active_speed = tp->link_config.active_speed;
5081         orig_active_duplex = tp->link_config.active_duplex;
5082
5083         if (!tg3_flag(tp, HW_AUTONEG) &&
5084             netif_carrier_ok(tp->dev) &&
5085             tg3_flag(tp, INIT_COMPLETE)) {
5086                 mac_status = tr32(MAC_STATUS);
5087                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5088                                MAC_STATUS_SIGNAL_DET |
5089                                MAC_STATUS_CFG_CHANGED |
5090                                MAC_STATUS_RCVD_CFG);
5091                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5092                                    MAC_STATUS_SIGNAL_DET)) {
5093                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5094                                             MAC_STATUS_CFG_CHANGED));
5095                         return 0;
5096                 }
5097         }
5098
5099         tw32_f(MAC_TX_AUTO_NEG, 0);
5100
5101         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5102         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5103         tw32_f(MAC_MODE, tp->mac_mode);
5104         udelay(40);
5105
5106         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5107                 tg3_init_bcm8002(tp);
5108
5109         /* Enable link change event even when serdes polling.  */
5110         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5111         udelay(40);
5112
5113         current_link_up = 0;
5114         tp->link_config.rmt_adv = 0;
5115         mac_status = tr32(MAC_STATUS);
5116
5117         if (tg3_flag(tp, HW_AUTONEG))
5118                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5119         else
5120                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5121
5122         tp->napi[0].hw_status->status =
5123                 (SD_STATUS_UPDATED |
5124                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5125
5126         for (i = 0; i < 100; i++) {
5127                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5128                                     MAC_STATUS_CFG_CHANGED));
5129                 udelay(5);
5130                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5131                                          MAC_STATUS_CFG_CHANGED |
5132                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5133                         break;
5134         }
5135
5136         mac_status = tr32(MAC_STATUS);
5137         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5138                 current_link_up = 0;
5139                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5140                     tp->serdes_counter == 0) {
5141                         tw32_f(MAC_MODE, (tp->mac_mode |
5142                                           MAC_MODE_SEND_CONFIGS));
5143                         udelay(1);
5144                         tw32_f(MAC_MODE, tp->mac_mode);
5145                 }
5146         }
5147
5148         if (current_link_up == 1) {
5149                 tp->link_config.active_speed = SPEED_1000;
5150                 tp->link_config.active_duplex = DUPLEX_FULL;
5151                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5152                                     LED_CTRL_LNKLED_OVERRIDE |
5153                                     LED_CTRL_1000MBPS_ON));
5154         } else {
5155                 tp->link_config.active_speed = SPEED_UNKNOWN;
5156                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5157                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5158                                     LED_CTRL_LNKLED_OVERRIDE |
5159                                     LED_CTRL_TRAFFIC_OVERRIDE));
5160         }
5161
5162         if (current_link_up != netif_carrier_ok(tp->dev)) {
5163                 if (current_link_up)
5164                         netif_carrier_on(tp->dev);
5165                 else
5166                         netif_carrier_off(tp->dev);
5167                 tg3_link_report(tp);
5168         } else {
5169                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5170                 if (orig_pause_cfg != now_pause_cfg ||
5171                     orig_active_speed != tp->link_config.active_speed ||
5172                     orig_active_duplex != tp->link_config.active_duplex)
5173                         tg3_link_report(tp);
5174         }
5175
5176         return 0;
5177 }
5178
5179 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5180 {
5181         int current_link_up, err = 0;
5182         u32 bmsr, bmcr;
5183         u16 current_speed;
5184         u8 current_duplex;
5185         u32 local_adv, remote_adv;
5186
5187         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5188         tw32_f(MAC_MODE, tp->mac_mode);
5189         udelay(40);
5190
5191         tw32(MAC_EVENT, 0);
5192
5193         tw32_f(MAC_STATUS,
5194              (MAC_STATUS_SYNC_CHANGED |
5195               MAC_STATUS_CFG_CHANGED |
5196               MAC_STATUS_MI_COMPLETION |
5197               MAC_STATUS_LNKSTATE_CHANGED));
5198         udelay(40);
5199
5200         if (force_reset)
5201                 tg3_phy_reset(tp);
5202
5203         current_link_up = 0;
5204         current_speed = SPEED_UNKNOWN;
5205         current_duplex = DUPLEX_UNKNOWN;
5206         tp->link_config.rmt_adv = 0;
5207
5208         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5209         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5211                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5212                         bmsr |= BMSR_LSTATUS;
5213                 else
5214                         bmsr &= ~BMSR_LSTATUS;
5215         }
5216
5217         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5218
5219         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5220             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5221                 /* do nothing, just check for link up at the end */
5222         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5223                 u32 adv, newadv;
5224
5225                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5226                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5227                                  ADVERTISE_1000XPAUSE |
5228                                  ADVERTISE_1000XPSE_ASYM |
5229                                  ADVERTISE_SLCT);
5230
5231                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5232                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5233
5234                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5235                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5236                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5237                         tg3_writephy(tp, MII_BMCR, bmcr);
5238
5239                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5240                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5241                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5242
5243                         return err;
5244                 }
5245         } else {
5246                 u32 new_bmcr;
5247
5248                 bmcr &= ~BMCR_SPEED1000;
5249                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5250
5251                 if (tp->link_config.duplex == DUPLEX_FULL)
5252                         new_bmcr |= BMCR_FULLDPLX;
5253
5254                 if (new_bmcr != bmcr) {
5255                         /* BMCR_SPEED1000 is a reserved bit that needs
5256                          * to be set on write.
5257                          */
5258                         new_bmcr |= BMCR_SPEED1000;
5259
5260                         /* Force a linkdown */
5261                         if (netif_carrier_ok(tp->dev)) {
5262                                 u32 adv;
5263
5264                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5265                                 adv &= ~(ADVERTISE_1000XFULL |
5266                                          ADVERTISE_1000XHALF |
5267                                          ADVERTISE_SLCT);
5268                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5269                                 tg3_writephy(tp, MII_BMCR, bmcr |
5270                                                            BMCR_ANRESTART |
5271                                                            BMCR_ANENABLE);
5272                                 udelay(10);
5273                                 netif_carrier_off(tp->dev);
5274                         }
5275                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5276                         bmcr = new_bmcr;
5277                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5278                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5279                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5280                             ASIC_REV_5714) {
5281                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5282                                         bmsr |= BMSR_LSTATUS;
5283                                 else
5284                                         bmsr &= ~BMSR_LSTATUS;
5285                         }
5286                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5287                 }
5288         }
5289
5290         if (bmsr & BMSR_LSTATUS) {
5291                 current_speed = SPEED_1000;
5292                 current_link_up = 1;
5293                 if (bmcr & BMCR_FULLDPLX)
5294                         current_duplex = DUPLEX_FULL;
5295                 else
5296                         current_duplex = DUPLEX_HALF;
5297
5298                 local_adv = 0;
5299                 remote_adv = 0;
5300
5301                 if (bmcr & BMCR_ANENABLE) {
5302                         u32 common;
5303
5304                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5305                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5306                         common = local_adv & remote_adv;
5307                         if (common & (ADVERTISE_1000XHALF |
5308                                       ADVERTISE_1000XFULL)) {
5309                                 if (common & ADVERTISE_1000XFULL)
5310                                         current_duplex = DUPLEX_FULL;
5311                                 else
5312                                         current_duplex = DUPLEX_HALF;
5313
5314                                 tp->link_config.rmt_adv =
5315                                            mii_adv_to_ethtool_adv_x(remote_adv);
5316                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5317                                 /* Link is up via parallel detect */
5318                         } else {
5319                                 current_link_up = 0;
5320                         }
5321                 }
5322         }
5323
5324         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5325                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5326
5327         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5328         if (tp->link_config.active_duplex == DUPLEX_HALF)
5329                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5330
5331         tw32_f(MAC_MODE, tp->mac_mode);
5332         udelay(40);
5333
5334         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5335
5336         tp->link_config.active_speed = current_speed;
5337         tp->link_config.active_duplex = current_duplex;
5338
5339         if (current_link_up != netif_carrier_ok(tp->dev)) {
5340                 if (current_link_up)
5341                         netif_carrier_on(tp->dev);
5342                 else {
5343                         netif_carrier_off(tp->dev);
5344                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5345                 }
5346                 tg3_link_report(tp);
5347         }
5348         return err;
5349 }
5350
5351 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5352 {
5353         if (tp->serdes_counter) {
5354                 /* Give autoneg time to complete. */
5355                 tp->serdes_counter--;
5356                 return;
5357         }
5358
5359         if (!netif_carrier_ok(tp->dev) &&
5360             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5361                 u32 bmcr;
5362
5363                 tg3_readphy(tp, MII_BMCR, &bmcr);
5364                 if (bmcr & BMCR_ANENABLE) {
5365                         u32 phy1, phy2;
5366
5367                         /* Select shadow register 0x1f */
5368                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5369                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5370
5371                         /* Select expansion interrupt status register */
5372                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5373                                          MII_TG3_DSP_EXP1_INT_STAT);
5374                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5375                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5376
5377                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5378                                 /* We have signal detect and not receiving
5379                                  * config code words, link is up by parallel
5380                                  * detection.
5381                                  */
5382
5383                                 bmcr &= ~BMCR_ANENABLE;
5384                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5385                                 tg3_writephy(tp, MII_BMCR, bmcr);
5386                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5387                         }
5388                 }
5389         } else if (netif_carrier_ok(tp->dev) &&
5390                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5391                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5392                 u32 phy2;
5393
5394                 /* Select expansion interrupt status register */
5395                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5396                                  MII_TG3_DSP_EXP1_INT_STAT);
5397                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5398                 if (phy2 & 0x20) {
5399                         u32 bmcr;
5400
5401                         /* Config code words received, turn on autoneg. */
5402                         tg3_readphy(tp, MII_BMCR, &bmcr);
5403                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5404
5405                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5406
5407                 }
5408         }
5409 }
5410
5411 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5412 {
5413         u32 val;
5414         int err;
5415
5416         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5417                 err = tg3_setup_fiber_phy(tp, force_reset);
5418         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5419                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5420         else
5421                 err = tg3_setup_copper_phy(tp, force_reset);
5422
5423         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5424                 u32 scale;
5425
5426                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5427                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5428                         scale = 65;
5429                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5430                         scale = 6;
5431                 else
5432                         scale = 12;
5433
5434                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5435                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5436                 tw32(GRC_MISC_CFG, val);
5437         }
5438
5439         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5440               (6 << TX_LENGTHS_IPG_SHIFT);
5441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5442                 val |= tr32(MAC_TX_LENGTHS) &
5443                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5444                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5445
5446         if (tp->link_config.active_speed == SPEED_1000 &&
5447             tp->link_config.active_duplex == DUPLEX_HALF)
5448                 tw32(MAC_TX_LENGTHS, val |
5449                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5450         else
5451                 tw32(MAC_TX_LENGTHS, val |
5452                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5453
5454         if (!tg3_flag(tp, 5705_PLUS)) {
5455                 if (netif_carrier_ok(tp->dev)) {
5456                         tw32(HOSTCC_STAT_COAL_TICKS,
5457                              tp->coal.stats_block_coalesce_usecs);
5458                 } else {
5459                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5460                 }
5461         }
5462
5463         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5464                 val = tr32(PCIE_PWR_MGMT_THRESH);
5465                 if (!netif_carrier_ok(tp->dev))
5466                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5467                               tp->pwrmgmt_thresh;
5468                 else
5469                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5470                 tw32(PCIE_PWR_MGMT_THRESH, val);
5471         }
5472
5473         return err;
5474 }
5475
5476 static inline int tg3_irq_sync(struct tg3 *tp)
5477 {
5478         return tp->irq_sync;
5479 }
5480
5481 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5482 {
5483         int i;
5484
5485         dst = (u32 *)((u8 *)dst + off);
5486         for (i = 0; i < len; i += sizeof(u32))
5487                 *dst++ = tr32(off + i);
5488 }
5489
5490 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5491 {
5492         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5493         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5494         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5495         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5496         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5497         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5498         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5499         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5500         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5501         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5502         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5503         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5504         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5505         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5506         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5507         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5508         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5509         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5510         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5511
5512         if (tg3_flag(tp, SUPPORT_MSIX))
5513                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5514
5515         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5516         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5517         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5518         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5519         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5520         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5521         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5522         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5523
5524         if (!tg3_flag(tp, 5705_PLUS)) {
5525                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5526                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5527                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5528         }
5529
5530         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5531         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5532         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5533         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5534         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5535
5536         if (tg3_flag(tp, NVRAM))
5537                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5538 }
5539
5540 static void tg3_dump_state(struct tg3 *tp)
5541 {
5542         int i;
5543         u32 *regs;
5544
5545         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5546         if (!regs) {
5547                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5548                 return;
5549         }
5550
5551         if (tg3_flag(tp, PCI_EXPRESS)) {
5552                 /* Read up to but not including private PCI registers */
5553                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5554                         regs[i / sizeof(u32)] = tr32(i);
5555         } else
5556                 tg3_dump_legacy_regs(tp, regs);
5557
5558         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5559                 if (!regs[i + 0] && !regs[i + 1] &&
5560                     !regs[i + 2] && !regs[i + 3])
5561                         continue;
5562
5563                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5564                            i * 4,
5565                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5566         }
5567
5568         kfree(regs);
5569
5570         for (i = 0; i < tp->irq_cnt; i++) {
5571                 struct tg3_napi *tnapi = &tp->napi[i];
5572
5573                 /* SW status block */
5574                 netdev_err(tp->dev,
5575                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5576                            i,
5577                            tnapi->hw_status->status,
5578                            tnapi->hw_status->status_tag,
5579                            tnapi->hw_status->rx_jumbo_consumer,
5580                            tnapi->hw_status->rx_consumer,
5581                            tnapi->hw_status->rx_mini_consumer,
5582                            tnapi->hw_status->idx[0].rx_producer,
5583                            tnapi->hw_status->idx[0].tx_consumer);
5584
5585                 netdev_err(tp->dev,
5586                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5587                            i,
5588                            tnapi->last_tag, tnapi->last_irq_tag,
5589                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5590                            tnapi->rx_rcb_ptr,
5591                            tnapi->prodring.rx_std_prod_idx,
5592                            tnapi->prodring.rx_std_cons_idx,
5593                            tnapi->prodring.rx_jmb_prod_idx,
5594                            tnapi->prodring.rx_jmb_cons_idx);
5595         }
5596 }
5597
5598 /* This is called whenever we suspect that the system chipset is re-
5599  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5600  * is bogus tx completions. We try to recover by setting the
5601  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5602  * in the workqueue.
5603  */
5604 static void tg3_tx_recover(struct tg3 *tp)
5605 {
5606         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5607                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5608
5609         netdev_warn(tp->dev,
5610                     "The system may be re-ordering memory-mapped I/O "
5611                     "cycles to the network device, attempting to recover. "
5612                     "Please report the problem to the driver maintainer "
5613                     "and include system chipset information.\n");
5614
5615         spin_lock(&tp->lock);
5616         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5617         spin_unlock(&tp->lock);
5618 }
5619
5620 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5621 {
5622         /* Tell compiler to fetch tx indices from memory. */
5623         barrier();
5624         return tnapi->tx_pending -
5625                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5626 }
5627
5628 /* Tigon3 never reports partial packet sends.  So we do not
5629  * need special logic to handle SKBs that have not had all
5630  * of their frags sent yet, like SunGEM does.
5631  */
5632 static void tg3_tx(struct tg3_napi *tnapi)
5633 {
5634         struct tg3 *tp = tnapi->tp;
5635         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5636         u32 sw_idx = tnapi->tx_cons;
5637         struct netdev_queue *txq;
5638         int index = tnapi - tp->napi;
5639         unsigned int pkts_compl = 0, bytes_compl = 0;
5640
5641         if (tg3_flag(tp, ENABLE_TSS))
5642                 index--;
5643
5644         txq = netdev_get_tx_queue(tp->dev, index);
5645
5646         while (sw_idx != hw_idx) {
5647                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5648                 struct sk_buff *skb = ri->skb;
5649                 int i, tx_bug = 0;
5650
5651                 if (unlikely(skb == NULL)) {
5652                         tg3_tx_recover(tp);
5653                         return;
5654                 }
5655
5656                 pci_unmap_single(tp->pdev,
5657                                  dma_unmap_addr(ri, mapping),
5658                                  skb_headlen(skb),
5659                                  PCI_DMA_TODEVICE);
5660
5661                 ri->skb = NULL;
5662
5663                 while (ri->fragmented) {
5664                         ri->fragmented = false;
5665                         sw_idx = NEXT_TX(sw_idx);
5666                         ri = &tnapi->tx_buffers[sw_idx];
5667                 }
5668
5669                 sw_idx = NEXT_TX(sw_idx);
5670
5671                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5672                         ri = &tnapi->tx_buffers[sw_idx];
5673                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5674                                 tx_bug = 1;
5675
5676                         pci_unmap_page(tp->pdev,
5677                                        dma_unmap_addr(ri, mapping),
5678                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5679                                        PCI_DMA_TODEVICE);
5680
5681                         while (ri->fragmented) {
5682                                 ri->fragmented = false;
5683                                 sw_idx = NEXT_TX(sw_idx);
5684                                 ri = &tnapi->tx_buffers[sw_idx];
5685                         }
5686
5687                         sw_idx = NEXT_TX(sw_idx);
5688                 }
5689
5690                 pkts_compl++;
5691                 bytes_compl += skb->len;
5692
5693                 dev_kfree_skb(skb);
5694
5695                 if (unlikely(tx_bug)) {
5696                         tg3_tx_recover(tp);
5697                         return;
5698                 }
5699         }
5700
5701         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5702
5703         tnapi->tx_cons = sw_idx;
5704
5705         /* Need to make the tx_cons update visible to tg3_start_xmit()
5706          * before checking for netif_queue_stopped().  Without the
5707          * memory barrier, there is a small possibility that tg3_start_xmit()
5708          * will miss it and cause the queue to be stopped forever.
5709          */
5710         smp_mb();
5711
5712         if (unlikely(netif_tx_queue_stopped(txq) &&
5713                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5714                 __netif_tx_lock(txq, smp_processor_id());
5715                 if (netif_tx_queue_stopped(txq) &&
5716                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5717                         netif_tx_wake_queue(txq);
5718                 __netif_tx_unlock(txq);
5719         }
5720 }
5721
5722 static void tg3_frag_free(bool is_frag, void *data)
5723 {
5724         if (is_frag)
5725                 put_page(virt_to_head_page(data));
5726         else
5727                 kfree(data);
5728 }
5729
5730 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5731 {
5732         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5733                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5734
5735         if (!ri->data)
5736                 return;
5737
5738         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5739                          map_sz, PCI_DMA_FROMDEVICE);
5740         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5741         ri->data = NULL;
5742 }
5743
5744
5745 /* Returns size of skb allocated or < 0 on error.
5746  *
5747  * We only need to fill in the address because the other members
5748  * of the RX descriptor are invariant, see tg3_init_rings.
5749  *
5750  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5751  * posting buffers we only dirty the first cache line of the RX
5752  * descriptor (containing the address).  Whereas for the RX status
5753  * buffers the cpu only reads the last cacheline of the RX descriptor
5754  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5755  */
5756 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5757                              u32 opaque_key, u32 dest_idx_unmasked,
5758                              unsigned int *frag_size)
5759 {
5760         struct tg3_rx_buffer_desc *desc;
5761         struct ring_info *map;
5762         u8 *data;
5763         dma_addr_t mapping;
5764         int skb_size, data_size, dest_idx;
5765
5766         switch (opaque_key) {
5767         case RXD_OPAQUE_RING_STD:
5768                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5769                 desc = &tpr->rx_std[dest_idx];
5770                 map = &tpr->rx_std_buffers[dest_idx];
5771                 data_size = tp->rx_pkt_map_sz;
5772                 break;
5773
5774         case RXD_OPAQUE_RING_JUMBO:
5775                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5776                 desc = &tpr->rx_jmb[dest_idx].std;
5777                 map = &tpr->rx_jmb_buffers[dest_idx];
5778                 data_size = TG3_RX_JMB_MAP_SZ;
5779                 break;
5780
5781         default:
5782                 return -EINVAL;
5783         }
5784
5785         /* Do not overwrite any of the map or rp information
5786          * until we are sure we can commit to a new buffer.
5787          *
5788          * Callers depend upon this behavior and assume that
5789          * we leave everything unchanged if we fail.
5790          */
5791         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5792                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5793         if (skb_size <= PAGE_SIZE) {
5794                 data = netdev_alloc_frag(skb_size);
5795                 *frag_size = skb_size;
5796         } else {
5797                 data = kmalloc(skb_size, GFP_ATOMIC);
5798                 *frag_size = 0;
5799         }
5800         if (!data)
5801                 return -ENOMEM;
5802
5803         mapping = pci_map_single(tp->pdev,
5804                                  data + TG3_RX_OFFSET(tp),
5805                                  data_size,
5806                                  PCI_DMA_FROMDEVICE);
5807         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5808                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5809                 return -EIO;
5810         }
5811
5812         map->data = data;
5813         dma_unmap_addr_set(map, mapping, mapping);
5814
5815         desc->addr_hi = ((u64)mapping >> 32);
5816         desc->addr_lo = ((u64)mapping & 0xffffffff);
5817
5818         return data_size;
5819 }
5820
5821 /* We only need to move over in the address because the other
5822  * members of the RX descriptor are invariant.  See notes above
5823  * tg3_alloc_rx_data for full details.
5824  */
5825 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5826                            struct tg3_rx_prodring_set *dpr,
5827                            u32 opaque_key, int src_idx,
5828                            u32 dest_idx_unmasked)
5829 {
5830         struct tg3 *tp = tnapi->tp;
5831         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5832         struct ring_info *src_map, *dest_map;
5833         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5834         int dest_idx;
5835
5836         switch (opaque_key) {
5837         case RXD_OPAQUE_RING_STD:
5838                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5839                 dest_desc = &dpr->rx_std[dest_idx];
5840                 dest_map = &dpr->rx_std_buffers[dest_idx];
5841                 src_desc = &spr->rx_std[src_idx];
5842                 src_map = &spr->rx_std_buffers[src_idx];
5843                 break;
5844
5845         case RXD_OPAQUE_RING_JUMBO:
5846                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5847                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5848                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5849                 src_desc = &spr->rx_jmb[src_idx].std;
5850                 src_map = &spr->rx_jmb_buffers[src_idx];
5851                 break;
5852
5853         default:
5854                 return;
5855         }
5856
5857         dest_map->data = src_map->data;
5858         dma_unmap_addr_set(dest_map, mapping,
5859                            dma_unmap_addr(src_map, mapping));
5860         dest_desc->addr_hi = src_desc->addr_hi;
5861         dest_desc->addr_lo = src_desc->addr_lo;
5862
5863         /* Ensure that the update to the skb happens after the physical
5864          * addresses have been transferred to the new BD location.
5865          */
5866         smp_wmb();
5867
5868         src_map->data = NULL;
5869 }
5870
5871 /* The RX ring scheme is composed of multiple rings which post fresh
5872  * buffers to the chip, and one special ring the chip uses to report
5873  * status back to the host.
5874  *
5875  * The special ring reports the status of received packets to the
5876  * host.  The chip does not write into the original descriptor the
5877  * RX buffer was obtained from.  The chip simply takes the original
5878  * descriptor as provided by the host, updates the status and length
5879  * field, then writes this into the next status ring entry.
5880  *
5881  * Each ring the host uses to post buffers to the chip is described
5882  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5883  * it is first placed into the on-chip ram.  When the packet's length
5884  * is known, it walks down the TG3_BDINFO entries to select the ring.
5885  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5886  * which is within the range of the new packet's length is chosen.
5887  *
5888  * The "separate ring for rx status" scheme may sound queer, but it makes
5889  * sense from a cache coherency perspective.  If only the host writes
5890  * to the buffer post rings, and only the chip writes to the rx status
5891  * rings, then cache lines never move beyond shared-modified state.
5892  * If both the host and chip were to write into the same ring, cache line
5893  * eviction could occur since both entities want it in an exclusive state.
5894  */
5895 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5896 {
5897         struct tg3 *tp = tnapi->tp;
5898         u32 work_mask, rx_std_posted = 0;
5899         u32 std_prod_idx, jmb_prod_idx;
5900         u32 sw_idx = tnapi->rx_rcb_ptr;
5901         u16 hw_idx;
5902         int received;
5903         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5904
5905         hw_idx = *(tnapi->rx_rcb_prod_idx);
5906         /*
5907          * We need to order the read of hw_idx and the read of
5908          * the opaque cookie.
5909          */
5910         rmb();
5911         work_mask = 0;
5912         received = 0;
5913         std_prod_idx = tpr->rx_std_prod_idx;
5914         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5915         while (sw_idx != hw_idx && budget > 0) {
5916                 struct ring_info *ri;
5917                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5918                 unsigned int len;
5919                 struct sk_buff *skb;
5920                 dma_addr_t dma_addr;
5921                 u32 opaque_key, desc_idx, *post_ptr;
5922                 u8 *data;
5923
5924                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5925                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5926                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5927                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5928                         dma_addr = dma_unmap_addr(ri, mapping);
5929                         data = ri->data;
5930                         post_ptr = &std_prod_idx;
5931                         rx_std_posted++;
5932                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5933                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5934                         dma_addr = dma_unmap_addr(ri, mapping);
5935                         data = ri->data;
5936                         post_ptr = &jmb_prod_idx;
5937                 } else
5938                         goto next_pkt_nopost;
5939
5940                 work_mask |= opaque_key;
5941
5942                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5943                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5944                 drop_it:
5945                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5946                                        desc_idx, *post_ptr);
5947                 drop_it_no_recycle:
5948                         /* Other statistics kept track of by card. */
5949                         tp->rx_dropped++;
5950                         goto next_pkt;
5951                 }
5952
5953                 prefetch(data + TG3_RX_OFFSET(tp));
5954                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5955                       ETH_FCS_LEN;
5956
5957                 if (len > TG3_RX_COPY_THRESH(tp)) {
5958                         int skb_size;
5959                         unsigned int frag_size;
5960
5961                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5962                                                     *post_ptr, &frag_size);
5963                         if (skb_size < 0)
5964                                 goto drop_it;
5965
5966                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5967                                          PCI_DMA_FROMDEVICE);
5968
5969                         skb = build_skb(data, frag_size);
5970                         if (!skb) {
5971                                 tg3_frag_free(frag_size != 0, data);
5972                                 goto drop_it_no_recycle;
5973                         }
5974                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5975                         /* Ensure that the update to the data happens
5976                          * after the usage of the old DMA mapping.
5977                          */
5978                         smp_wmb();
5979
5980                         ri->data = NULL;
5981
5982                 } else {
5983                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5984                                        desc_idx, *post_ptr);
5985
5986                         skb = netdev_alloc_skb(tp->dev,
5987                                                len + TG3_RAW_IP_ALIGN);
5988                         if (skb == NULL)
5989                                 goto drop_it_no_recycle;
5990
5991                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5992                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5993                         memcpy(skb->data,
5994                                data + TG3_RX_OFFSET(tp),
5995                                len);
5996                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5997                 }
5998
5999                 skb_put(skb, len);
6000                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6001                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6002                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6003                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6004                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6005                 else
6006                         skb_checksum_none_assert(skb);
6007
6008                 skb->protocol = eth_type_trans(skb, tp->dev);
6009
6010                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6011                     skb->protocol != htons(ETH_P_8021Q)) {
6012                         dev_kfree_skb(skb);
6013                         goto drop_it_no_recycle;
6014                 }
6015
6016                 if (desc->type_flags & RXD_FLAG_VLAN &&
6017                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6018                         __vlan_hwaccel_put_tag(skb,
6019                                                desc->err_vlan & RXD_VLAN_MASK);
6020
6021                 napi_gro_receive(&tnapi->napi, skb);
6022
6023                 received++;
6024                 budget--;
6025
6026 next_pkt:
6027                 (*post_ptr)++;
6028
6029                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6030                         tpr->rx_std_prod_idx = std_prod_idx &
6031                                                tp->rx_std_ring_mask;
6032                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6033                                      tpr->rx_std_prod_idx);
6034                         work_mask &= ~RXD_OPAQUE_RING_STD;
6035                         rx_std_posted = 0;
6036                 }
6037 next_pkt_nopost:
6038                 sw_idx++;
6039                 sw_idx &= tp->rx_ret_ring_mask;
6040
6041                 /* Refresh hw_idx to see if there is new work */
6042                 if (sw_idx == hw_idx) {
6043                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6044                         rmb();
6045                 }
6046         }
6047
6048         /* ACK the status ring. */
6049         tnapi->rx_rcb_ptr = sw_idx;
6050         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6051
6052         /* Refill RX ring(s). */
6053         if (!tg3_flag(tp, ENABLE_RSS)) {
6054                 /* Sync BD data before updating mailbox */
6055                 wmb();
6056
6057                 if (work_mask & RXD_OPAQUE_RING_STD) {
6058                         tpr->rx_std_prod_idx = std_prod_idx &
6059                                                tp->rx_std_ring_mask;
6060                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6061                                      tpr->rx_std_prod_idx);
6062                 }
6063                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6064                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6065                                                tp->rx_jmb_ring_mask;
6066                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6067                                      tpr->rx_jmb_prod_idx);
6068                 }
6069                 mmiowb();
6070         } else if (work_mask) {
6071                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6072                  * updated before the producer indices can be updated.
6073                  */
6074                 smp_wmb();
6075
6076                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6077                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6078
6079                 if (tnapi != &tp->napi[1]) {
6080                         tp->rx_refill = true;
6081                         napi_schedule(&tp->napi[1].napi);
6082                 }
6083         }
6084
6085         return received;
6086 }
6087
6088 static void tg3_poll_link(struct tg3 *tp)
6089 {
6090         /* handle link change and other phy events */
6091         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6092                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6093
6094                 if (sblk->status & SD_STATUS_LINK_CHG) {
6095                         sblk->status = SD_STATUS_UPDATED |
6096                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6097                         spin_lock(&tp->lock);
6098                         if (tg3_flag(tp, USE_PHYLIB)) {
6099                                 tw32_f(MAC_STATUS,
6100                                      (MAC_STATUS_SYNC_CHANGED |
6101                                       MAC_STATUS_CFG_CHANGED |
6102                                       MAC_STATUS_MI_COMPLETION |
6103                                       MAC_STATUS_LNKSTATE_CHANGED));
6104                                 udelay(40);
6105                         } else
6106                                 tg3_setup_phy(tp, 0);
6107                         spin_unlock(&tp->lock);
6108                 }
6109         }
6110 }
6111
6112 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6113                                 struct tg3_rx_prodring_set *dpr,
6114                                 struct tg3_rx_prodring_set *spr)
6115 {
6116         u32 si, di, cpycnt, src_prod_idx;
6117         int i, err = 0;
6118
6119         while (1) {
6120                 src_prod_idx = spr->rx_std_prod_idx;
6121
6122                 /* Make sure updates to the rx_std_buffers[] entries and the
6123                  * standard producer index are seen in the correct order.
6124                  */
6125                 smp_rmb();
6126
6127                 if (spr->rx_std_cons_idx == src_prod_idx)
6128                         break;
6129
6130                 if (spr->rx_std_cons_idx < src_prod_idx)
6131                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6132                 else
6133                         cpycnt = tp->rx_std_ring_mask + 1 -
6134                                  spr->rx_std_cons_idx;
6135
6136                 cpycnt = min(cpycnt,
6137                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6138
6139                 si = spr->rx_std_cons_idx;
6140                 di = dpr->rx_std_prod_idx;
6141
6142                 for (i = di; i < di + cpycnt; i++) {
6143                         if (dpr->rx_std_buffers[i].data) {
6144                                 cpycnt = i - di;
6145                                 err = -ENOSPC;
6146                                 break;
6147                         }
6148                 }
6149
6150                 if (!cpycnt)
6151                         break;
6152
6153                 /* Ensure that updates to the rx_std_buffers ring and the
6154                  * shadowed hardware producer ring from tg3_recycle_skb() are
6155                  * ordered correctly WRT the skb check above.
6156                  */
6157                 smp_rmb();
6158
6159                 memcpy(&dpr->rx_std_buffers[di],
6160                        &spr->rx_std_buffers[si],
6161                        cpycnt * sizeof(struct ring_info));
6162
6163                 for (i = 0; i < cpycnt; i++, di++, si++) {
6164                         struct tg3_rx_buffer_desc *sbd, *dbd;
6165                         sbd = &spr->rx_std[si];
6166                         dbd = &dpr->rx_std[di];
6167                         dbd->addr_hi = sbd->addr_hi;
6168                         dbd->addr_lo = sbd->addr_lo;
6169                 }
6170
6171                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6172                                        tp->rx_std_ring_mask;
6173                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6174                                        tp->rx_std_ring_mask;
6175         }
6176
6177         while (1) {
6178                 src_prod_idx = spr->rx_jmb_prod_idx;
6179
6180                 /* Make sure updates to the rx_jmb_buffers[] entries and
6181                  * the jumbo producer index are seen in the correct order.
6182                  */
6183                 smp_rmb();
6184
6185                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6186                         break;
6187
6188                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6189                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6190                 else
6191                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6192                                  spr->rx_jmb_cons_idx;
6193
6194                 cpycnt = min(cpycnt,
6195                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6196
6197                 si = spr->rx_jmb_cons_idx;
6198                 di = dpr->rx_jmb_prod_idx;
6199
6200                 for (i = di; i < di + cpycnt; i++) {
6201                         if (dpr->rx_jmb_buffers[i].data) {
6202                                 cpycnt = i - di;
6203                                 err = -ENOSPC;
6204                                 break;
6205                         }
6206                 }
6207
6208                 if (!cpycnt)
6209                         break;
6210
6211                 /* Ensure that updates to the rx_jmb_buffers ring and the
6212                  * shadowed hardware producer ring from tg3_recycle_skb() are
6213                  * ordered correctly WRT the skb check above.
6214                  */
6215                 smp_rmb();
6216
6217                 memcpy(&dpr->rx_jmb_buffers[di],
6218                        &spr->rx_jmb_buffers[si],
6219                        cpycnt * sizeof(struct ring_info));
6220
6221                 for (i = 0; i < cpycnt; i++, di++, si++) {
6222                         struct tg3_rx_buffer_desc *sbd, *dbd;
6223                         sbd = &spr->rx_jmb[si].std;
6224                         dbd = &dpr->rx_jmb[di].std;
6225                         dbd->addr_hi = sbd->addr_hi;
6226                         dbd->addr_lo = sbd->addr_lo;
6227                 }
6228
6229                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6230                                        tp->rx_jmb_ring_mask;
6231                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6232                                        tp->rx_jmb_ring_mask;
6233         }
6234
6235         return err;
6236 }
6237
6238 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6239 {
6240         struct tg3 *tp = tnapi->tp;
6241
6242         /* run TX completion thread */
6243         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6244                 tg3_tx(tnapi);
6245                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6246                         return work_done;
6247         }
6248
6249         if (!tnapi->rx_rcb_prod_idx)
6250                 return work_done;
6251
6252         /* run RX thread, within the bounds set by NAPI.
6253          * All RX "locking" is done by ensuring outside
6254          * code synchronizes with tg3->napi.poll()
6255          */
6256         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6257                 work_done += tg3_rx(tnapi, budget - work_done);
6258
6259         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6260                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6261                 int i, err = 0;
6262                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6263                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6264
6265                 tp->rx_refill = false;
6266                 for (i = 1; i <= tp->rxq_cnt; i++)
6267                         err |= tg3_rx_prodring_xfer(tp, dpr,
6268                                                     &tp->napi[i].prodring);
6269
6270                 wmb();
6271
6272                 if (std_prod_idx != dpr->rx_std_prod_idx)
6273                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6274                                      dpr->rx_std_prod_idx);
6275
6276                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6277                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6278                                      dpr->rx_jmb_prod_idx);
6279
6280                 mmiowb();
6281
6282                 if (err)
6283                         tw32_f(HOSTCC_MODE, tp->coal_now);
6284         }
6285
6286         return work_done;
6287 }
6288
6289 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6290 {
6291         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6292                 schedule_work(&tp->reset_task);
6293 }
6294
6295 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6296 {
6297         cancel_work_sync(&tp->reset_task);
6298         tg3_flag_clear(tp, RESET_TASK_PENDING);
6299         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6300 }
6301
6302 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6303 {
6304         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6305         struct tg3 *tp = tnapi->tp;
6306         int work_done = 0;
6307         struct tg3_hw_status *sblk = tnapi->hw_status;
6308
6309         while (1) {
6310                 work_done = tg3_poll_work(tnapi, work_done, budget);
6311
6312                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6313                         goto tx_recovery;
6314
6315                 if (unlikely(work_done >= budget))
6316                         break;
6317
6318                 /* tp->last_tag is used in tg3_int_reenable() below
6319                  * to tell the hw how much work has been processed,
6320                  * so we must read it before checking for more work.
6321                  */
6322                 tnapi->last_tag = sblk->status_tag;
6323                 tnapi->last_irq_tag = tnapi->last_tag;
6324                 rmb();
6325
6326                 /* check for RX/TX work to do */
6327                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6328                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6329
6330                         /* This test here is not race free, but will reduce
6331                          * the number of interrupts by looping again.
6332                          */
6333                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6334                                 continue;
6335
6336                         napi_complete(napi);
6337                         /* Reenable interrupts. */
6338                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6339
6340                         /* This test here is synchronized by napi_schedule()
6341                          * and napi_complete() to close the race condition.
6342                          */
6343                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6344                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6345                                                   HOSTCC_MODE_ENABLE |
6346                                                   tnapi->coal_now);
6347                         }
6348                         mmiowb();
6349                         break;
6350                 }
6351         }
6352
6353         return work_done;
6354
6355 tx_recovery:
6356         /* work_done is guaranteed to be less than budget. */
6357         napi_complete(napi);
6358         tg3_reset_task_schedule(tp);
6359         return work_done;
6360 }
6361
6362 static void tg3_process_error(struct tg3 *tp)
6363 {
6364         u32 val;
6365         bool real_error = false;
6366
6367         if (tg3_flag(tp, ERROR_PROCESSED))
6368                 return;
6369
6370         /* Check Flow Attention register */
6371         val = tr32(HOSTCC_FLOW_ATTN);
6372         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6373                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6374                 real_error = true;
6375         }
6376
6377         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6378                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6379                 real_error = true;
6380         }
6381
6382         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6383                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6384                 real_error = true;
6385         }
6386
6387         if (!real_error)
6388                 return;
6389
6390         tg3_dump_state(tp);
6391
6392         tg3_flag_set(tp, ERROR_PROCESSED);
6393         tg3_reset_task_schedule(tp);
6394 }
6395
6396 static int tg3_poll(struct napi_struct *napi, int budget)
6397 {
6398         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6399         struct tg3 *tp = tnapi->tp;
6400         int work_done = 0;
6401         struct tg3_hw_status *sblk = tnapi->hw_status;
6402
6403         while (1) {
6404                 if (sblk->status & SD_STATUS_ERROR)
6405                         tg3_process_error(tp);
6406
6407                 tg3_poll_link(tp);
6408
6409                 work_done = tg3_poll_work(tnapi, work_done, budget);
6410
6411                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6412                         goto tx_recovery;
6413
6414                 if (unlikely(work_done >= budget))
6415                         break;
6416
6417                 if (tg3_flag(tp, TAGGED_STATUS)) {
6418                         /* tp->last_tag is used in tg3_int_reenable() below
6419                          * to tell the hw how much work has been processed,
6420                          * so we must read it before checking for more work.
6421                          */
6422                         tnapi->last_tag = sblk->status_tag;
6423                         tnapi->last_irq_tag = tnapi->last_tag;
6424                         rmb();
6425                 } else
6426                         sblk->status &= ~SD_STATUS_UPDATED;
6427
6428                 if (likely(!tg3_has_work(tnapi))) {
6429                         napi_complete(napi);
6430                         tg3_int_reenable(tnapi);
6431                         break;
6432                 }
6433         }
6434
6435         return work_done;
6436
6437 tx_recovery:
6438         /* work_done is guaranteed to be less than budget. */
6439         napi_complete(napi);
6440         tg3_reset_task_schedule(tp);
6441         return work_done;
6442 }
6443
6444 static void tg3_napi_disable(struct tg3 *tp)
6445 {
6446         int i;
6447
6448         for (i = tp->irq_cnt - 1; i >= 0; i--)
6449                 napi_disable(&tp->napi[i].napi);
6450 }
6451
6452 static void tg3_napi_enable(struct tg3 *tp)
6453 {
6454         int i;
6455
6456         for (i = 0; i < tp->irq_cnt; i++)
6457                 napi_enable(&tp->napi[i].napi);
6458 }
6459
6460 static void tg3_napi_init(struct tg3 *tp)
6461 {
6462         int i;
6463
6464         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6465         for (i = 1; i < tp->irq_cnt; i++)
6466                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6467 }
6468
6469 static void tg3_napi_fini(struct tg3 *tp)
6470 {
6471         int i;
6472
6473         for (i = 0; i < tp->irq_cnt; i++)
6474                 netif_napi_del(&tp->napi[i].napi);
6475 }
6476
6477 static inline void tg3_netif_stop(struct tg3 *tp)
6478 {
6479         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6480         tg3_napi_disable(tp);
6481         netif_tx_disable(tp->dev);
6482 }
6483
6484 static inline void tg3_netif_start(struct tg3 *tp)
6485 {
6486         /* NOTE: unconditional netif_tx_wake_all_queues is only
6487          * appropriate so long as all callers are assured to
6488          * have free tx slots (such as after tg3_init_hw)
6489          */
6490         netif_tx_wake_all_queues(tp->dev);
6491
6492         tg3_napi_enable(tp);
6493         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6494         tg3_enable_ints(tp);
6495 }
6496
6497 static void tg3_irq_quiesce(struct tg3 *tp)
6498 {
6499         int i;
6500
6501         BUG_ON(tp->irq_sync);
6502
6503         tp->irq_sync = 1;
6504         smp_mb();
6505
6506         for (i = 0; i < tp->irq_cnt; i++)
6507                 synchronize_irq(tp->napi[i].irq_vec);
6508 }
6509
6510 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6511  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6512  * with as well.  Most of the time, this is not necessary except when
6513  * shutting down the device.
6514  */
6515 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6516 {
6517         spin_lock_bh(&tp->lock);
6518         if (irq_sync)
6519                 tg3_irq_quiesce(tp);
6520 }
6521
6522 static inline void tg3_full_unlock(struct tg3 *tp)
6523 {
6524         spin_unlock_bh(&tp->lock);
6525 }
6526
6527 /* One-shot MSI handler - Chip automatically disables interrupt
6528  * after sending MSI so driver doesn't have to do it.
6529  */
6530 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6531 {
6532         struct tg3_napi *tnapi = dev_id;
6533         struct tg3 *tp = tnapi->tp;
6534
6535         prefetch(tnapi->hw_status);
6536         if (tnapi->rx_rcb)
6537                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6538
6539         if (likely(!tg3_irq_sync(tp)))
6540                 napi_schedule(&tnapi->napi);
6541
6542         return IRQ_HANDLED;
6543 }
6544
6545 /* MSI ISR - No need to check for interrupt sharing and no need to
6546  * flush status block and interrupt mailbox. PCI ordering rules
6547  * guarantee that MSI will arrive after the status block.
6548  */
6549 static irqreturn_t tg3_msi(int irq, void *dev_id)
6550 {
6551         struct tg3_napi *tnapi = dev_id;
6552         struct tg3 *tp = tnapi->tp;
6553
6554         prefetch(tnapi->hw_status);
6555         if (tnapi->rx_rcb)
6556                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6557         /*
6558          * Writing any value to intr-mbox-0 clears PCI INTA# and
6559          * chip-internal interrupt pending events.
6560          * Writing non-zero to intr-mbox-0 additional tells the
6561          * NIC to stop sending us irqs, engaging "in-intr-handler"
6562          * event coalescing.
6563          */
6564         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6565         if (likely(!tg3_irq_sync(tp)))
6566                 napi_schedule(&tnapi->napi);
6567
6568         return IRQ_RETVAL(1);
6569 }
6570
6571 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6572 {
6573         struct tg3_napi *tnapi = dev_id;
6574         struct tg3 *tp = tnapi->tp;
6575         struct tg3_hw_status *sblk = tnapi->hw_status;
6576         unsigned int handled = 1;
6577
6578         /* In INTx mode, it is possible for the interrupt to arrive at
6579          * the CPU before the status block posted prior to the interrupt.
6580          * Reading the PCI State register will confirm whether the
6581          * interrupt is ours and will flush the status block.
6582          */
6583         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6584                 if (tg3_flag(tp, CHIP_RESETTING) ||
6585                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6586                         handled = 0;
6587                         goto out;
6588                 }
6589         }
6590
6591         /*
6592          * Writing any value to intr-mbox-0 clears PCI INTA# and
6593          * chip-internal interrupt pending events.
6594          * Writing non-zero to intr-mbox-0 additional tells the
6595          * NIC to stop sending us irqs, engaging "in-intr-handler"
6596          * event coalescing.
6597          *
6598          * Flush the mailbox to de-assert the IRQ immediately to prevent
6599          * spurious interrupts.  The flush impacts performance but
6600          * excessive spurious interrupts can be worse in some cases.
6601          */
6602         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6603         if (tg3_irq_sync(tp))
6604                 goto out;
6605         sblk->status &= ~SD_STATUS_UPDATED;
6606         if (likely(tg3_has_work(tnapi))) {
6607                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6608                 napi_schedule(&tnapi->napi);
6609         } else {
6610                 /* No work, shared interrupt perhaps?  re-enable
6611                  * interrupts, and flush that PCI write
6612                  */
6613                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6614                                0x00000000);
6615         }
6616 out:
6617         return IRQ_RETVAL(handled);
6618 }
6619
6620 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6621 {
6622         struct tg3_napi *tnapi = dev_id;
6623         struct tg3 *tp = tnapi->tp;
6624         struct tg3_hw_status *sblk = tnapi->hw_status;
6625         unsigned int handled = 1;
6626
6627         /* In INTx mode, it is possible for the interrupt to arrive at
6628          * the CPU before the status block posted prior to the interrupt.
6629          * Reading the PCI State register will confirm whether the
6630          * interrupt is ours and will flush the status block.
6631          */
6632         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6633                 if (tg3_flag(tp, CHIP_RESETTING) ||
6634                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6635                         handled = 0;
6636                         goto out;
6637                 }
6638         }
6639
6640         /*
6641          * writing any value to intr-mbox-0 clears PCI INTA# and
6642          * chip-internal interrupt pending events.
6643          * writing non-zero to intr-mbox-0 additional tells the
6644          * NIC to stop sending us irqs, engaging "in-intr-handler"
6645          * event coalescing.
6646          *
6647          * Flush the mailbox to de-assert the IRQ immediately to prevent
6648          * spurious interrupts.  The flush impacts performance but
6649          * excessive spurious interrupts can be worse in some cases.
6650          */
6651         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6652
6653         /*
6654          * In a shared interrupt configuration, sometimes other devices'
6655          * interrupts will scream.  We record the current status tag here
6656          * so that the above check can report that the screaming interrupts
6657          * are unhandled.  Eventually they will be silenced.
6658          */
6659         tnapi->last_irq_tag = sblk->status_tag;
6660
6661         if (tg3_irq_sync(tp))
6662                 goto out;
6663
6664         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6665
6666         napi_schedule(&tnapi->napi);
6667
6668 out:
6669         return IRQ_RETVAL(handled);
6670 }
6671
6672 /* ISR for interrupt test */
6673 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6674 {
6675         struct tg3_napi *tnapi = dev_id;
6676         struct tg3 *tp = tnapi->tp;
6677         struct tg3_hw_status *sblk = tnapi->hw_status;
6678
6679         if ((sblk->status & SD_STATUS_UPDATED) ||
6680             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6681                 tg3_disable_ints(tp);
6682                 return IRQ_RETVAL(1);
6683         }
6684         return IRQ_RETVAL(0);
6685 }
6686
6687 #ifdef CONFIG_NET_POLL_CONTROLLER
6688 static void tg3_poll_controller(struct net_device *dev)
6689 {
6690         int i;
6691         struct tg3 *tp = netdev_priv(dev);
6692
6693         for (i = 0; i < tp->irq_cnt; i++)
6694                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6695 }
6696 #endif
6697
6698 static void tg3_tx_timeout(struct net_device *dev)
6699 {
6700         struct tg3 *tp = netdev_priv(dev);
6701
6702         if (netif_msg_tx_err(tp)) {
6703                 netdev_err(dev, "transmit timed out, resetting\n");
6704                 tg3_dump_state(tp);
6705         }
6706
6707         tg3_reset_task_schedule(tp);
6708 }
6709
6710 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6711 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6712 {
6713         u32 base = (u32) mapping & 0xffffffff;
6714
6715         return (base > 0xffffdcc0) && (base + len + 8 < base);
6716 }
6717
6718 /* Test for DMA addresses > 40-bit */
6719 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6720                                           int len)
6721 {
6722 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6723         if (tg3_flag(tp, 40BIT_DMA_BUG))
6724                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6725         return 0;
6726 #else
6727         return 0;
6728 #endif
6729 }
6730
6731 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6732                                  dma_addr_t mapping, u32 len, u32 flags,
6733                                  u32 mss, u32 vlan)
6734 {
6735         txbd->addr_hi = ((u64) mapping >> 32);
6736         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6737         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6738         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6739 }
6740
6741 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6742                             dma_addr_t map, u32 len, u32 flags,
6743                             u32 mss, u32 vlan)
6744 {
6745         struct tg3 *tp = tnapi->tp;
6746         bool hwbug = false;
6747
6748         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6749                 hwbug = true;
6750
6751         if (tg3_4g_overflow_test(map, len))
6752                 hwbug = true;
6753
6754         if (tg3_40bit_overflow_test(tp, map, len))
6755                 hwbug = true;
6756
6757         if (tp->dma_limit) {
6758                 u32 prvidx = *entry;
6759                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6760                 while (len > tp->dma_limit && *budget) {
6761                         u32 frag_len = tp->dma_limit;
6762                         len -= tp->dma_limit;
6763
6764                         /* Avoid the 8byte DMA problem */
6765                         if (len <= 8) {
6766                                 len += tp->dma_limit / 2;
6767                                 frag_len = tp->dma_limit / 2;
6768                         }
6769
6770                         tnapi->tx_buffers[*entry].fragmented = true;
6771
6772                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6773                                       frag_len, tmp_flag, mss, vlan);
6774                         *budget -= 1;
6775                         prvidx = *entry;
6776                         *entry = NEXT_TX(*entry);
6777
6778                         map += frag_len;
6779                 }
6780
6781                 if (len) {
6782                         if (*budget) {
6783                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6784                                               len, flags, mss, vlan);
6785                                 *budget -= 1;
6786                                 *entry = NEXT_TX(*entry);
6787                         } else {
6788                                 hwbug = true;
6789                                 tnapi->tx_buffers[prvidx].fragmented = false;
6790                         }
6791                 }
6792         } else {
6793                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6794                               len, flags, mss, vlan);
6795                 *entry = NEXT_TX(*entry);
6796         }
6797
6798         return hwbug;
6799 }
6800
6801 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6802 {
6803         int i;
6804         struct sk_buff *skb;
6805         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6806
6807         skb = txb->skb;
6808         txb->skb = NULL;
6809
6810         pci_unmap_single(tnapi->tp->pdev,
6811                          dma_unmap_addr(txb, mapping),
6812                          skb_headlen(skb),
6813                          PCI_DMA_TODEVICE);
6814
6815         while (txb->fragmented) {
6816                 txb->fragmented = false;
6817                 entry = NEXT_TX(entry);
6818                 txb = &tnapi->tx_buffers[entry];
6819         }
6820
6821         for (i = 0; i <= last; i++) {
6822                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6823
6824                 entry = NEXT_TX(entry);
6825                 txb = &tnapi->tx_buffers[entry];
6826
6827                 pci_unmap_page(tnapi->tp->pdev,
6828                                dma_unmap_addr(txb, mapping),
6829                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6830
6831                 while (txb->fragmented) {
6832                         txb->fragmented = false;
6833                         entry = NEXT_TX(entry);
6834                         txb = &tnapi->tx_buffers[entry];
6835                 }
6836         }
6837 }
6838
6839 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6840 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6841                                        struct sk_buff **pskb,
6842                                        u32 *entry, u32 *budget,
6843                                        u32 base_flags, u32 mss, u32 vlan)
6844 {
6845         struct tg3 *tp = tnapi->tp;
6846         struct sk_buff *new_skb, *skb = *pskb;
6847         dma_addr_t new_addr = 0;
6848         int ret = 0;
6849
6850         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6851                 new_skb = skb_copy(skb, GFP_ATOMIC);
6852         else {
6853                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6854
6855                 new_skb = skb_copy_expand(skb,
6856                                           skb_headroom(skb) + more_headroom,
6857                                           skb_tailroom(skb), GFP_ATOMIC);
6858         }
6859
6860         if (!new_skb) {
6861                 ret = -1;
6862         } else {
6863                 /* New SKB is guaranteed to be linear. */
6864                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6865                                           PCI_DMA_TODEVICE);
6866                 /* Make sure the mapping succeeded */
6867                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6868                         dev_kfree_skb(new_skb);
6869                         ret = -1;
6870                 } else {
6871                         u32 save_entry = *entry;
6872
6873                         base_flags |= TXD_FLAG_END;
6874
6875                         tnapi->tx_buffers[*entry].skb = new_skb;
6876                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6877                                            mapping, new_addr);
6878
6879                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6880                                             new_skb->len, base_flags,
6881                                             mss, vlan)) {
6882                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6883                                 dev_kfree_skb(new_skb);
6884                                 ret = -1;
6885                         }
6886                 }
6887         }
6888
6889         dev_kfree_skb(skb);
6890         *pskb = new_skb;
6891         return ret;
6892 }
6893
6894 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6895
6896 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6897  * TSO header is greater than 80 bytes.
6898  */
6899 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6900 {
6901         struct sk_buff *segs, *nskb;
6902         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6903
6904         /* Estimate the number of fragments in the worst case */
6905         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6906                 netif_stop_queue(tp->dev);
6907
6908                 /* netif_tx_stop_queue() must be done before checking
6909                  * checking tx index in tg3_tx_avail() below, because in
6910                  * tg3_tx(), we update tx index before checking for
6911                  * netif_tx_queue_stopped().
6912                  */
6913                 smp_mb();
6914                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6915                         return NETDEV_TX_BUSY;
6916
6917                 netif_wake_queue(tp->dev);
6918         }
6919
6920         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6921         if (IS_ERR(segs))
6922                 goto tg3_tso_bug_end;
6923
6924         do {
6925                 nskb = segs;
6926                 segs = segs->next;
6927                 nskb->next = NULL;
6928                 tg3_start_xmit(nskb, tp->dev);
6929         } while (segs);
6930
6931 tg3_tso_bug_end:
6932         dev_kfree_skb(skb);
6933
6934         return NETDEV_TX_OK;
6935 }
6936
6937 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6938  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6939  */
6940 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6941 {
6942         struct tg3 *tp = netdev_priv(dev);
6943         u32 len, entry, base_flags, mss, vlan = 0;
6944         u32 budget;
6945         int i = -1, would_hit_hwbug;
6946         dma_addr_t mapping;
6947         struct tg3_napi *tnapi;
6948         struct netdev_queue *txq;
6949         unsigned int last;
6950
6951         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6952         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6953         if (tg3_flag(tp, ENABLE_TSS))
6954                 tnapi++;
6955
6956         budget = tg3_tx_avail(tnapi);
6957
6958         /* We are running in BH disabled context with netif_tx_lock
6959          * and TX reclaim runs via tp->napi.poll inside of a software
6960          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6961          * no IRQ context deadlocks to worry about either.  Rejoice!
6962          */
6963         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6964                 if (!netif_tx_queue_stopped(txq)) {
6965                         netif_tx_stop_queue(txq);
6966
6967                         /* This is a hard error, log it. */
6968                         netdev_err(dev,
6969                                    "BUG! Tx Ring full when queue awake!\n");
6970                 }
6971                 return NETDEV_TX_BUSY;
6972         }
6973
6974         entry = tnapi->tx_prod;
6975         base_flags = 0;
6976         if (skb->ip_summed == CHECKSUM_PARTIAL)
6977                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6978
6979         mss = skb_shinfo(skb)->gso_size;
6980         if (mss) {
6981                 struct iphdr *iph;
6982                 u32 tcp_opt_len, hdr_len;
6983
6984                 if (skb_header_cloned(skb) &&
6985                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6986                         goto drop;
6987
6988                 iph = ip_hdr(skb);
6989                 tcp_opt_len = tcp_optlen(skb);
6990
6991                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6992
6993                 if (!skb_is_gso_v6(skb)) {
6994                         iph->check = 0;
6995                         iph->tot_len = htons(mss + hdr_len);
6996                 }
6997
6998                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6999                     tg3_flag(tp, TSO_BUG))
7000                         return tg3_tso_bug(tp, skb);
7001
7002                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7003                                TXD_FLAG_CPU_POST_DMA);
7004
7005                 if (tg3_flag(tp, HW_TSO_1) ||
7006                     tg3_flag(tp, HW_TSO_2) ||
7007                     tg3_flag(tp, HW_TSO_3)) {
7008                         tcp_hdr(skb)->check = 0;
7009                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7010                 } else
7011                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7012                                                                  iph->daddr, 0,
7013                                                                  IPPROTO_TCP,
7014                                                                  0);
7015
7016                 if (tg3_flag(tp, HW_TSO_3)) {
7017                         mss |= (hdr_len & 0xc) << 12;
7018                         if (hdr_len & 0x10)
7019                                 base_flags |= 0x00000010;
7020                         base_flags |= (hdr_len & 0x3e0) << 5;
7021                 } else if (tg3_flag(tp, HW_TSO_2))
7022                         mss |= hdr_len << 9;
7023                 else if (tg3_flag(tp, HW_TSO_1) ||
7024                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7025                         if (tcp_opt_len || iph->ihl > 5) {
7026                                 int tsflags;
7027
7028                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7029                                 mss |= (tsflags << 11);
7030                         }
7031                 } else {
7032                         if (tcp_opt_len || iph->ihl > 5) {
7033                                 int tsflags;
7034
7035                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7036                                 base_flags |= tsflags << 12;
7037                         }
7038                 }
7039         }
7040
7041         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7042             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7043                 base_flags |= TXD_FLAG_JMB_PKT;
7044
7045         if (vlan_tx_tag_present(skb)) {
7046                 base_flags |= TXD_FLAG_VLAN;
7047                 vlan = vlan_tx_tag_get(skb);
7048         }
7049
7050         len = skb_headlen(skb);
7051
7052         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7053         if (pci_dma_mapping_error(tp->pdev, mapping))
7054                 goto drop;
7055
7056
7057         tnapi->tx_buffers[entry].skb = skb;
7058         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7059
7060         would_hit_hwbug = 0;
7061
7062         if (tg3_flag(tp, 5701_DMA_BUG))
7063                 would_hit_hwbug = 1;
7064
7065         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7066                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7067                             mss, vlan)) {
7068                 would_hit_hwbug = 1;
7069         } else if (skb_shinfo(skb)->nr_frags > 0) {
7070                 u32 tmp_mss = mss;
7071
7072                 if (!tg3_flag(tp, HW_TSO_1) &&
7073                     !tg3_flag(tp, HW_TSO_2) &&
7074                     !tg3_flag(tp, HW_TSO_3))
7075                         tmp_mss = 0;
7076
7077                 /* Now loop through additional data
7078                  * fragments, and queue them.
7079                  */
7080                 last = skb_shinfo(skb)->nr_frags - 1;
7081                 for (i = 0; i <= last; i++) {
7082                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7083
7084                         len = skb_frag_size(frag);
7085                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7086                                                    len, DMA_TO_DEVICE);
7087
7088                         tnapi->tx_buffers[entry].skb = NULL;
7089                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7090                                            mapping);
7091                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7092                                 goto dma_error;
7093
7094                         if (!budget ||
7095                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7096                                             len, base_flags |
7097                                             ((i == last) ? TXD_FLAG_END : 0),
7098                                             tmp_mss, vlan)) {
7099                                 would_hit_hwbug = 1;
7100                                 break;
7101                         }
7102                 }
7103         }
7104
7105         if (would_hit_hwbug) {
7106                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7107
7108                 /* If the workaround fails due to memory/mapping
7109                  * failure, silently drop this packet.
7110                  */
7111                 entry = tnapi->tx_prod;
7112                 budget = tg3_tx_avail(tnapi);
7113                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7114                                                 base_flags, mss, vlan))
7115                         goto drop_nofree;
7116         }
7117
7118         skb_tx_timestamp(skb);
7119         netdev_tx_sent_queue(txq, skb->len);
7120
7121         /* Sync BD data before updating mailbox */
7122         wmb();
7123
7124         /* Packets are ready, update Tx producer idx local and on card. */
7125         tw32_tx_mbox(tnapi->prodmbox, entry);
7126
7127         tnapi->tx_prod = entry;
7128         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7129                 netif_tx_stop_queue(txq);
7130
7131                 /* netif_tx_stop_queue() must be done before checking
7132                  * checking tx index in tg3_tx_avail() below, because in
7133                  * tg3_tx(), we update tx index before checking for
7134                  * netif_tx_queue_stopped().
7135                  */
7136                 smp_mb();
7137                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7138                         netif_tx_wake_queue(txq);
7139         }
7140
7141         mmiowb();
7142         return NETDEV_TX_OK;
7143
7144 dma_error:
7145         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7146         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7147 drop:
7148         dev_kfree_skb(skb);
7149 drop_nofree:
7150         tp->tx_dropped++;
7151         return NETDEV_TX_OK;
7152 }
7153
7154 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7155 {
7156         if (enable) {
7157                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7158                                   MAC_MODE_PORT_MODE_MASK);
7159
7160                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7161
7162                 if (!tg3_flag(tp, 5705_PLUS))
7163                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7164
7165                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7166                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7167                 else
7168                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7169         } else {
7170                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7171
7172                 if (tg3_flag(tp, 5705_PLUS) ||
7173                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7174                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7175                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7176         }
7177
7178         tw32(MAC_MODE, tp->mac_mode);
7179         udelay(40);
7180 }
7181
7182 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7183 {
7184         u32 val, bmcr, mac_mode, ptest = 0;
7185
7186         tg3_phy_toggle_apd(tp, false);
7187         tg3_phy_toggle_automdix(tp, 0);
7188
7189         if (extlpbk && tg3_phy_set_extloopbk(tp))
7190                 return -EIO;
7191
7192         bmcr = BMCR_FULLDPLX;
7193         switch (speed) {
7194         case SPEED_10:
7195                 break;
7196         case SPEED_100:
7197                 bmcr |= BMCR_SPEED100;
7198                 break;
7199         case SPEED_1000:
7200         default:
7201                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7202                         speed = SPEED_100;
7203                         bmcr |= BMCR_SPEED100;
7204                 } else {
7205                         speed = SPEED_1000;
7206                         bmcr |= BMCR_SPEED1000;
7207                 }
7208         }
7209
7210         if (extlpbk) {
7211                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7212                         tg3_readphy(tp, MII_CTRL1000, &val);
7213                         val |= CTL1000_AS_MASTER |
7214                                CTL1000_ENABLE_MASTER;
7215                         tg3_writephy(tp, MII_CTRL1000, val);
7216                 } else {
7217                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7218                                 MII_TG3_FET_PTEST_TRIM_2;
7219                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7220                 }
7221         } else
7222                 bmcr |= BMCR_LOOPBACK;
7223
7224         tg3_writephy(tp, MII_BMCR, bmcr);
7225
7226         /* The write needs to be flushed for the FETs */
7227         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7228                 tg3_readphy(tp, MII_BMCR, &bmcr);
7229
7230         udelay(40);
7231
7232         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7233             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7234                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7235                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7236                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7237
7238                 /* The write needs to be flushed for the AC131 */
7239                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7240         }
7241
7242         /* Reset to prevent losing 1st rx packet intermittently */
7243         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7244             tg3_flag(tp, 5780_CLASS)) {
7245                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7246                 udelay(10);
7247                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7248         }
7249
7250         mac_mode = tp->mac_mode &
7251                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7252         if (speed == SPEED_1000)
7253                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7254         else
7255                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7256
7257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7258                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7259
7260                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7261                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7262                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7263                         mac_mode |= MAC_MODE_LINK_POLARITY;
7264
7265                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7266                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7267         }
7268
7269         tw32(MAC_MODE, mac_mode);
7270         udelay(40);
7271
7272         return 0;
7273 }
7274
7275 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7276 {
7277         struct tg3 *tp = netdev_priv(dev);
7278
7279         if (features & NETIF_F_LOOPBACK) {
7280                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7281                         return;
7282
7283                 spin_lock_bh(&tp->lock);
7284                 tg3_mac_loopback(tp, true);
7285                 netif_carrier_on(tp->dev);
7286                 spin_unlock_bh(&tp->lock);
7287                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7288         } else {
7289                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7290                         return;
7291
7292                 spin_lock_bh(&tp->lock);
7293                 tg3_mac_loopback(tp, false);
7294                 /* Force link status check */
7295                 tg3_setup_phy(tp, 1);
7296                 spin_unlock_bh(&tp->lock);
7297                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7298         }
7299 }
7300
7301 static netdev_features_t tg3_fix_features(struct net_device *dev,
7302         netdev_features_t features)
7303 {
7304         struct tg3 *tp = netdev_priv(dev);
7305
7306         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7307                 features &= ~NETIF_F_ALL_TSO;
7308
7309         return features;
7310 }
7311
7312 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7313 {
7314         netdev_features_t changed = dev->features ^ features;
7315
7316         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7317                 tg3_set_loopback(dev, features);
7318
7319         return 0;
7320 }
7321
7322 static void tg3_rx_prodring_free(struct tg3 *tp,
7323                                  struct tg3_rx_prodring_set *tpr)
7324 {
7325         int i;
7326
7327         if (tpr != &tp->napi[0].prodring) {
7328                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7329                      i = (i + 1) & tp->rx_std_ring_mask)
7330                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7331                                         tp->rx_pkt_map_sz);
7332
7333                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7334                         for (i = tpr->rx_jmb_cons_idx;
7335                              i != tpr->rx_jmb_prod_idx;
7336                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7337                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7338                                                 TG3_RX_JMB_MAP_SZ);
7339                         }
7340                 }
7341
7342                 return;
7343         }
7344
7345         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7346                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7347                                 tp->rx_pkt_map_sz);
7348
7349         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7350                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7351                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7352                                         TG3_RX_JMB_MAP_SZ);
7353         }
7354 }
7355
7356 /* Initialize rx rings for packet processing.
7357  *
7358  * The chip has been shut down and the driver detached from
7359  * the networking, so no interrupts or new tx packets will
7360  * end up in the driver.  tp->{tx,}lock are held and thus
7361  * we may not sleep.
7362  */
7363 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7364                                  struct tg3_rx_prodring_set *tpr)
7365 {
7366         u32 i, rx_pkt_dma_sz;
7367
7368         tpr->rx_std_cons_idx = 0;
7369         tpr->rx_std_prod_idx = 0;
7370         tpr->rx_jmb_cons_idx = 0;
7371         tpr->rx_jmb_prod_idx = 0;
7372
7373         if (tpr != &tp->napi[0].prodring) {
7374                 memset(&tpr->rx_std_buffers[0], 0,
7375                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7376                 if (tpr->rx_jmb_buffers)
7377                         memset(&tpr->rx_jmb_buffers[0], 0,
7378                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7379                 goto done;
7380         }
7381
7382         /* Zero out all descriptors. */
7383         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7384
7385         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7386         if (tg3_flag(tp, 5780_CLASS) &&
7387             tp->dev->mtu > ETH_DATA_LEN)
7388                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7389         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7390
7391         /* Initialize invariants of the rings, we only set this
7392          * stuff once.  This works because the card does not
7393          * write into the rx buffer posting rings.
7394          */
7395         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7396                 struct tg3_rx_buffer_desc *rxd;
7397
7398                 rxd = &tpr->rx_std[i];
7399                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7400                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7401                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7402                                (i << RXD_OPAQUE_INDEX_SHIFT));
7403         }
7404
7405         /* Now allocate fresh SKBs for each rx ring. */
7406         for (i = 0; i < tp->rx_pending; i++) {
7407                 unsigned int frag_size;
7408
7409                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7410                                       &frag_size) < 0) {
7411                         netdev_warn(tp->dev,
7412                                     "Using a smaller RX standard ring. Only "
7413                                     "%d out of %d buffers were allocated "
7414                                     "successfully\n", i, tp->rx_pending);
7415                         if (i == 0)
7416                                 goto initfail;
7417                         tp->rx_pending = i;
7418                         break;
7419                 }
7420         }
7421
7422         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7423                 goto done;
7424
7425         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7426
7427         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7428                 goto done;
7429
7430         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7431                 struct tg3_rx_buffer_desc *rxd;
7432
7433                 rxd = &tpr->rx_jmb[i].std;
7434                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7435                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7436                                   RXD_FLAG_JUMBO;
7437                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7438                        (i << RXD_OPAQUE_INDEX_SHIFT));
7439         }
7440
7441         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7442                 unsigned int frag_size;
7443
7444                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7445                                       &frag_size) < 0) {
7446                         netdev_warn(tp->dev,
7447                                     "Using a smaller RX jumbo ring. Only %d "
7448                                     "out of %d buffers were allocated "
7449                                     "successfully\n", i, tp->rx_jumbo_pending);
7450                         if (i == 0)
7451                                 goto initfail;
7452                         tp->rx_jumbo_pending = i;
7453                         break;
7454                 }
7455         }
7456
7457 done:
7458         return 0;
7459
7460 initfail:
7461         tg3_rx_prodring_free(tp, tpr);
7462         return -ENOMEM;
7463 }
7464
7465 static void tg3_rx_prodring_fini(struct tg3 *tp,
7466                                  struct tg3_rx_prodring_set *tpr)
7467 {
7468         kfree(tpr->rx_std_buffers);
7469         tpr->rx_std_buffers = NULL;
7470         kfree(tpr->rx_jmb_buffers);
7471         tpr->rx_jmb_buffers = NULL;
7472         if (tpr->rx_std) {
7473                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7474                                   tpr->rx_std, tpr->rx_std_mapping);
7475                 tpr->rx_std = NULL;
7476         }
7477         if (tpr->rx_jmb) {
7478                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7479                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7480                 tpr->rx_jmb = NULL;
7481         }
7482 }
7483
7484 static int tg3_rx_prodring_init(struct tg3 *tp,
7485                                 struct tg3_rx_prodring_set *tpr)
7486 {
7487         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7488                                       GFP_KERNEL);
7489         if (!tpr->rx_std_buffers)
7490                 return -ENOMEM;
7491
7492         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7493                                          TG3_RX_STD_RING_BYTES(tp),
7494                                          &tpr->rx_std_mapping,
7495                                          GFP_KERNEL);
7496         if (!tpr->rx_std)
7497                 goto err_out;
7498
7499         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7500                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7501                                               GFP_KERNEL);
7502                 if (!tpr->rx_jmb_buffers)
7503                         goto err_out;
7504
7505                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7506                                                  TG3_RX_JMB_RING_BYTES(tp),
7507                                                  &tpr->rx_jmb_mapping,
7508                                                  GFP_KERNEL);
7509                 if (!tpr->rx_jmb)
7510                         goto err_out;
7511         }
7512
7513         return 0;
7514
7515 err_out:
7516         tg3_rx_prodring_fini(tp, tpr);
7517         return -ENOMEM;
7518 }
7519
7520 /* Free up pending packets in all rx/tx rings.
7521  *
7522  * The chip has been shut down and the driver detached from
7523  * the networking, so no interrupts or new tx packets will
7524  * end up in the driver.  tp->{tx,}lock is not held and we are not
7525  * in an interrupt context and thus may sleep.
7526  */
7527 static void tg3_free_rings(struct tg3 *tp)
7528 {
7529         int i, j;
7530
7531         for (j = 0; j < tp->irq_cnt; j++) {
7532                 struct tg3_napi *tnapi = &tp->napi[j];
7533
7534                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7535
7536                 if (!tnapi->tx_buffers)
7537                         continue;
7538
7539                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7540                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7541
7542                         if (!skb)
7543                                 continue;
7544
7545                         tg3_tx_skb_unmap(tnapi, i,
7546                                          skb_shinfo(skb)->nr_frags - 1);
7547
7548                         dev_kfree_skb_any(skb);
7549                 }
7550                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7551         }
7552 }
7553
7554 /* Initialize tx/rx rings for packet processing.
7555  *
7556  * The chip has been shut down and the driver detached from
7557  * the networking, so no interrupts or new tx packets will
7558  * end up in the driver.  tp->{tx,}lock are held and thus
7559  * we may not sleep.
7560  */
7561 static int tg3_init_rings(struct tg3 *tp)
7562 {
7563         int i;
7564
7565         /* Free up all the SKBs. */
7566         tg3_free_rings(tp);
7567
7568         for (i = 0; i < tp->irq_cnt; i++) {
7569                 struct tg3_napi *tnapi = &tp->napi[i];
7570
7571                 tnapi->last_tag = 0;
7572                 tnapi->last_irq_tag = 0;
7573                 tnapi->hw_status->status = 0;
7574                 tnapi->hw_status->status_tag = 0;
7575                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7576
7577                 tnapi->tx_prod = 0;
7578                 tnapi->tx_cons = 0;
7579                 if (tnapi->tx_ring)
7580                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7581
7582                 tnapi->rx_rcb_ptr = 0;
7583                 if (tnapi->rx_rcb)
7584                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7585
7586                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7587                         tg3_free_rings(tp);
7588                         return -ENOMEM;
7589                 }
7590         }
7591
7592         return 0;
7593 }
7594
7595 static void tg3_mem_tx_release(struct tg3 *tp)
7596 {
7597         int i;
7598
7599         for (i = 0; i < tp->irq_max; i++) {
7600                 struct tg3_napi *tnapi = &tp->napi[i];
7601
7602                 if (tnapi->tx_ring) {
7603                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7604                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7605                         tnapi->tx_ring = NULL;
7606                 }
7607
7608                 kfree(tnapi->tx_buffers);
7609                 tnapi->tx_buffers = NULL;
7610         }
7611 }
7612
7613 static int tg3_mem_tx_acquire(struct tg3 *tp)
7614 {
7615         int i;
7616         struct tg3_napi *tnapi = &tp->napi[0];
7617
7618         /* If multivector TSS is enabled, vector 0 does not handle
7619          * tx interrupts.  Don't allocate any resources for it.
7620          */
7621         if (tg3_flag(tp, ENABLE_TSS))
7622                 tnapi++;
7623
7624         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7625                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7626                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7627                 if (!tnapi->tx_buffers)
7628                         goto err_out;
7629
7630                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7631                                                     TG3_TX_RING_BYTES,
7632                                                     &tnapi->tx_desc_mapping,
7633                                                     GFP_KERNEL);
7634                 if (!tnapi->tx_ring)
7635                         goto err_out;
7636         }
7637
7638         return 0;
7639
7640 err_out:
7641         tg3_mem_tx_release(tp);
7642         return -ENOMEM;
7643 }
7644
7645 static void tg3_mem_rx_release(struct tg3 *tp)
7646 {
7647         int i;
7648
7649         for (i = 0; i < tp->irq_max; i++) {
7650                 struct tg3_napi *tnapi = &tp->napi[i];
7651
7652                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7653
7654                 if (!tnapi->rx_rcb)
7655                         continue;
7656
7657                 dma_free_coherent(&tp->pdev->dev,
7658                                   TG3_RX_RCB_RING_BYTES(tp),
7659                                   tnapi->rx_rcb,
7660                                   tnapi->rx_rcb_mapping);
7661                 tnapi->rx_rcb = NULL;
7662         }
7663 }
7664
7665 static int tg3_mem_rx_acquire(struct tg3 *tp)
7666 {
7667         unsigned int i, limit;
7668
7669         limit = tp->rxq_cnt;
7670
7671         /* If RSS is enabled, we need a (dummy) producer ring
7672          * set on vector zero.  This is the true hw prodring.
7673          */
7674         if (tg3_flag(tp, ENABLE_RSS))
7675                 limit++;
7676
7677         for (i = 0; i < limit; i++) {
7678                 struct tg3_napi *tnapi = &tp->napi[i];
7679
7680                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7681                         goto err_out;
7682
7683                 /* If multivector RSS is enabled, vector 0
7684                  * does not handle rx or tx interrupts.
7685                  * Don't allocate any resources for it.
7686                  */
7687                 if (!i && tg3_flag(tp, ENABLE_RSS))
7688                         continue;
7689
7690                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7691                                                    TG3_RX_RCB_RING_BYTES(tp),
7692                                                    &tnapi->rx_rcb_mapping,
7693                                                    GFP_KERNEL);
7694                 if (!tnapi->rx_rcb)
7695                         goto err_out;
7696
7697                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7698         }
7699
7700         return 0;
7701
7702 err_out:
7703         tg3_mem_rx_release(tp);
7704         return -ENOMEM;
7705 }
7706
7707 /*
7708  * Must not be invoked with interrupt sources disabled and
7709  * the hardware shutdown down.
7710  */
7711 static void tg3_free_consistent(struct tg3 *tp)
7712 {
7713         int i;
7714
7715         for (i = 0; i < tp->irq_cnt; i++) {
7716                 struct tg3_napi *tnapi = &tp->napi[i];
7717
7718                 if (tnapi->hw_status) {
7719                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7720                                           tnapi->hw_status,
7721                                           tnapi->status_mapping);
7722                         tnapi->hw_status = NULL;
7723                 }
7724         }
7725
7726         tg3_mem_rx_release(tp);
7727         tg3_mem_tx_release(tp);
7728
7729         if (tp->hw_stats) {
7730                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7731                                   tp->hw_stats, tp->stats_mapping);
7732                 tp->hw_stats = NULL;
7733         }
7734 }
7735
7736 /*
7737  * Must not be invoked with interrupt sources disabled and
7738  * the hardware shutdown down.  Can sleep.
7739  */
7740 static int tg3_alloc_consistent(struct tg3 *tp)
7741 {
7742         int i;
7743
7744         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7745                                           sizeof(struct tg3_hw_stats),
7746                                           &tp->stats_mapping,
7747                                           GFP_KERNEL);
7748         if (!tp->hw_stats)
7749                 goto err_out;
7750
7751         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7752
7753         for (i = 0; i < tp->irq_cnt; i++) {
7754                 struct tg3_napi *tnapi = &tp->napi[i];
7755                 struct tg3_hw_status *sblk;
7756
7757                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7758                                                       TG3_HW_STATUS_SIZE,
7759                                                       &tnapi->status_mapping,
7760                                                       GFP_KERNEL);
7761                 if (!tnapi->hw_status)
7762                         goto err_out;
7763
7764                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7765                 sblk = tnapi->hw_status;
7766
7767                 if (tg3_flag(tp, ENABLE_RSS)) {
7768                         u16 *prodptr = NULL;
7769
7770                         /*
7771                          * When RSS is enabled, the status block format changes
7772                          * slightly.  The "rx_jumbo_consumer", "reserved",
7773                          * and "rx_mini_consumer" members get mapped to the
7774                          * other three rx return ring producer indexes.
7775                          */
7776                         switch (i) {
7777                         case 1:
7778                                 prodptr = &sblk->idx[0].rx_producer;
7779                                 break;
7780                         case 2:
7781                                 prodptr = &sblk->rx_jumbo_consumer;
7782                                 break;
7783                         case 3:
7784                                 prodptr = &sblk->reserved;
7785                                 break;
7786                         case 4:
7787                                 prodptr = &sblk->rx_mini_consumer;
7788                                 break;
7789                         }
7790                         tnapi->rx_rcb_prod_idx = prodptr;
7791                 } else {
7792                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7793                 }
7794         }
7795
7796         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7797                 goto err_out;
7798
7799         return 0;
7800
7801 err_out:
7802         tg3_free_consistent(tp);
7803         return -ENOMEM;
7804 }
7805
7806 #define MAX_WAIT_CNT 1000
7807
7808 /* To stop a block, clear the enable bit and poll till it
7809  * clears.  tp->lock is held.
7810  */
7811 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7812 {
7813         unsigned int i;
7814         u32 val;
7815
7816         if (tg3_flag(tp, 5705_PLUS)) {
7817                 switch (ofs) {
7818                 case RCVLSC_MODE:
7819                 case DMAC_MODE:
7820                 case MBFREE_MODE:
7821                 case BUFMGR_MODE:
7822                 case MEMARB_MODE:
7823                         /* We can't enable/disable these bits of the
7824                          * 5705/5750, just say success.
7825                          */
7826                         return 0;
7827
7828                 default:
7829                         break;
7830                 }
7831         }
7832
7833         val = tr32(ofs);
7834         val &= ~enable_bit;
7835         tw32_f(ofs, val);
7836
7837         for (i = 0; i < MAX_WAIT_CNT; i++) {
7838                 udelay(100);
7839                 val = tr32(ofs);
7840                 if ((val & enable_bit) == 0)
7841                         break;
7842         }
7843
7844         if (i == MAX_WAIT_CNT && !silent) {
7845                 dev_err(&tp->pdev->dev,
7846                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7847                         ofs, enable_bit);
7848                 return -ENODEV;
7849         }
7850
7851         return 0;
7852 }
7853
7854 /* tp->lock is held. */
7855 static int tg3_abort_hw(struct tg3 *tp, int silent)
7856 {
7857         int i, err;
7858
7859         tg3_disable_ints(tp);
7860
7861         tp->rx_mode &= ~RX_MODE_ENABLE;
7862         tw32_f(MAC_RX_MODE, tp->rx_mode);
7863         udelay(10);
7864
7865         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7866         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7867         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7868         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7869         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7870         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7871
7872         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7873         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7874         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7875         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7876         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7877         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7878         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7879
7880         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7881         tw32_f(MAC_MODE, tp->mac_mode);
7882         udelay(40);
7883
7884         tp->tx_mode &= ~TX_MODE_ENABLE;
7885         tw32_f(MAC_TX_MODE, tp->tx_mode);
7886
7887         for (i = 0; i < MAX_WAIT_CNT; i++) {
7888                 udelay(100);
7889                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7890                         break;
7891         }
7892         if (i >= MAX_WAIT_CNT) {
7893                 dev_err(&tp->pdev->dev,
7894                         "%s timed out, TX_MODE_ENABLE will not clear "
7895                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7896                 err |= -ENODEV;
7897         }
7898
7899         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7900         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7901         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7902
7903         tw32(FTQ_RESET, 0xffffffff);
7904         tw32(FTQ_RESET, 0x00000000);
7905
7906         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7907         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7908
7909         for (i = 0; i < tp->irq_cnt; i++) {
7910                 struct tg3_napi *tnapi = &tp->napi[i];
7911                 if (tnapi->hw_status)
7912                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7913         }
7914
7915         return err;
7916 }
7917
7918 /* Save PCI command register before chip reset */
7919 static void tg3_save_pci_state(struct tg3 *tp)
7920 {
7921         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7922 }
7923
7924 /* Restore PCI state after chip reset */
7925 static void tg3_restore_pci_state(struct tg3 *tp)
7926 {
7927         u32 val;
7928
7929         /* Re-enable indirect register accesses. */
7930         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7931                                tp->misc_host_ctrl);
7932
7933         /* Set MAX PCI retry to zero. */
7934         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7935         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7936             tg3_flag(tp, PCIX_MODE))
7937                 val |= PCISTATE_RETRY_SAME_DMA;
7938         /* Allow reads and writes to the APE register and memory space. */
7939         if (tg3_flag(tp, ENABLE_APE))
7940                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7941                        PCISTATE_ALLOW_APE_SHMEM_WR |
7942                        PCISTATE_ALLOW_APE_PSPACE_WR;
7943         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7944
7945         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7946
7947         if (!tg3_flag(tp, PCI_EXPRESS)) {
7948                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7949                                       tp->pci_cacheline_sz);
7950                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7951                                       tp->pci_lat_timer);
7952         }
7953
7954         /* Make sure PCI-X relaxed ordering bit is clear. */
7955         if (tg3_flag(tp, PCIX_MODE)) {
7956                 u16 pcix_cmd;
7957
7958                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7959                                      &pcix_cmd);
7960                 pcix_cmd &= ~PCI_X_CMD_ERO;
7961                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7962                                       pcix_cmd);
7963         }
7964
7965         if (tg3_flag(tp, 5780_CLASS)) {
7966
7967                 /* Chip reset on 5780 will reset MSI enable bit,
7968                  * so need to restore it.
7969                  */
7970                 if (tg3_flag(tp, USING_MSI)) {
7971                         u16 ctrl;
7972
7973                         pci_read_config_word(tp->pdev,
7974                                              tp->msi_cap + PCI_MSI_FLAGS,
7975                                              &ctrl);
7976                         pci_write_config_word(tp->pdev,
7977                                               tp->msi_cap + PCI_MSI_FLAGS,
7978                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7979                         val = tr32(MSGINT_MODE);
7980                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7981                 }
7982         }
7983 }
7984
7985 /* tp->lock is held. */
7986 static int tg3_chip_reset(struct tg3 *tp)
7987 {
7988         u32 val;
7989         void (*write_op)(struct tg3 *, u32, u32);
7990         int i, err;
7991
7992         tg3_nvram_lock(tp);
7993
7994         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7995
7996         /* No matching tg3_nvram_unlock() after this because
7997          * chip reset below will undo the nvram lock.
7998          */
7999         tp->nvram_lock_cnt = 0;
8000
8001         /* GRC_MISC_CFG core clock reset will clear the memory
8002          * enable bit in PCI register 4 and the MSI enable bit
8003          * on some chips, so we save relevant registers here.
8004          */
8005         tg3_save_pci_state(tp);
8006
8007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8008             tg3_flag(tp, 5755_PLUS))
8009                 tw32(GRC_FASTBOOT_PC, 0);
8010
8011         /*
8012          * We must avoid the readl() that normally takes place.
8013          * It locks machines, causes machine checks, and other
8014          * fun things.  So, temporarily disable the 5701
8015          * hardware workaround, while we do the reset.
8016          */
8017         write_op = tp->write32;
8018         if (write_op == tg3_write_flush_reg32)
8019                 tp->write32 = tg3_write32;
8020
8021         /* Prevent the irq handler from reading or writing PCI registers
8022          * during chip reset when the memory enable bit in the PCI command
8023          * register may be cleared.  The chip does not generate interrupt
8024          * at this time, but the irq handler may still be called due to irq
8025          * sharing or irqpoll.
8026          */
8027         tg3_flag_set(tp, CHIP_RESETTING);
8028         for (i = 0; i < tp->irq_cnt; i++) {
8029                 struct tg3_napi *tnapi = &tp->napi[i];
8030                 if (tnapi->hw_status) {
8031                         tnapi->hw_status->status = 0;
8032                         tnapi->hw_status->status_tag = 0;
8033                 }
8034                 tnapi->last_tag = 0;
8035                 tnapi->last_irq_tag = 0;
8036         }
8037         smp_mb();
8038
8039         for (i = 0; i < tp->irq_cnt; i++)
8040                 synchronize_irq(tp->napi[i].irq_vec);
8041
8042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8043                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8044                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8045         }
8046
8047         /* do the reset */
8048         val = GRC_MISC_CFG_CORECLK_RESET;
8049
8050         if (tg3_flag(tp, PCI_EXPRESS)) {
8051                 /* Force PCIe 1.0a mode */
8052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8053                     !tg3_flag(tp, 57765_PLUS) &&
8054                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8055                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8056                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8057
8058                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8059                         tw32(GRC_MISC_CFG, (1 << 29));
8060                         val |= (1 << 29);
8061                 }
8062         }
8063
8064         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8065                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8066                 tw32(GRC_VCPU_EXT_CTRL,
8067                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8068         }
8069
8070         /* Manage gphy power for all CPMU absent PCIe devices. */
8071         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8072                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8073
8074         tw32(GRC_MISC_CFG, val);
8075
8076         /* restore 5701 hardware bug workaround write method */
8077         tp->write32 = write_op;
8078
8079         /* Unfortunately, we have to delay before the PCI read back.
8080          * Some 575X chips even will not respond to a PCI cfg access
8081          * when the reset command is given to the chip.
8082          *
8083          * How do these hardware designers expect things to work
8084          * properly if the PCI write is posted for a long period
8085          * of time?  It is always necessary to have some method by
8086          * which a register read back can occur to push the write
8087          * out which does the reset.
8088          *
8089          * For most tg3 variants the trick below was working.
8090          * Ho hum...
8091          */
8092         udelay(120);
8093
8094         /* Flush PCI posted writes.  The normal MMIO registers
8095          * are inaccessible at this time so this is the only
8096          * way to make this reliably (actually, this is no longer
8097          * the case, see above).  I tried to use indirect
8098          * register read/write but this upset some 5701 variants.
8099          */
8100         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8101
8102         udelay(120);
8103
8104         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8105                 u16 val16;
8106
8107                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8108                         int j;
8109                         u32 cfg_val;
8110
8111                         /* Wait for link training to complete.  */
8112                         for (j = 0; j < 5000; j++)
8113                                 udelay(100);
8114
8115                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8116                         pci_write_config_dword(tp->pdev, 0xc4,
8117                                                cfg_val | (1 << 15));
8118                 }
8119
8120                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8121                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8122                 /*
8123                  * Older PCIe devices only support the 128 byte
8124                  * MPS setting.  Enforce the restriction.
8125                  */
8126                 if (!tg3_flag(tp, CPMU_PRESENT))
8127                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8128                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8129
8130                 /* Clear error status */
8131                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8132                                       PCI_EXP_DEVSTA_CED |
8133                                       PCI_EXP_DEVSTA_NFED |
8134                                       PCI_EXP_DEVSTA_FED |
8135                                       PCI_EXP_DEVSTA_URD);
8136         }
8137
8138         tg3_restore_pci_state(tp);
8139
8140         tg3_flag_clear(tp, CHIP_RESETTING);
8141         tg3_flag_clear(tp, ERROR_PROCESSED);
8142
8143         val = 0;
8144         if (tg3_flag(tp, 5780_CLASS))
8145                 val = tr32(MEMARB_MODE);
8146         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8147
8148         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8149                 tg3_stop_fw(tp);
8150                 tw32(0x5000, 0x400);
8151         }
8152
8153         tw32(GRC_MODE, tp->grc_mode);
8154
8155         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8156                 val = tr32(0xc4);
8157
8158                 tw32(0xc4, val | (1 << 15));
8159         }
8160
8161         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8162             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8163                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8164                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8165                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8166                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8167         }
8168
8169         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8170                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8171                 val = tp->mac_mode;
8172         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8173                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8174                 val = tp->mac_mode;
8175         } else
8176                 val = 0;
8177
8178         tw32_f(MAC_MODE, val);
8179         udelay(40);
8180
8181         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8182
8183         err = tg3_poll_fw(tp);
8184         if (err)
8185                 return err;
8186
8187         tg3_mdio_start(tp);
8188
8189         if (tg3_flag(tp, PCI_EXPRESS) &&
8190             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8191             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8192             !tg3_flag(tp, 57765_PLUS)) {
8193                 val = tr32(0x7c00);
8194
8195                 tw32(0x7c00, val | (1 << 25));
8196         }
8197
8198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8199                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8200                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8201         }
8202
8203         /* Reprobe ASF enable state.  */
8204         tg3_flag_clear(tp, ENABLE_ASF);
8205         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8206         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8207         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8208                 u32 nic_cfg;
8209
8210                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8211                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8212                         tg3_flag_set(tp, ENABLE_ASF);
8213                         tp->last_event_jiffies = jiffies;
8214                         if (tg3_flag(tp, 5750_PLUS))
8215                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8216                 }
8217         }
8218
8219         return 0;
8220 }
8221
8222 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8223 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8224
8225 /* tp->lock is held. */
8226 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8227 {
8228         int err;
8229
8230         tg3_stop_fw(tp);
8231
8232         tg3_write_sig_pre_reset(tp, kind);
8233
8234         tg3_abort_hw(tp, silent);
8235         err = tg3_chip_reset(tp);
8236
8237         __tg3_set_mac_addr(tp, 0);
8238
8239         tg3_write_sig_legacy(tp, kind);
8240         tg3_write_sig_post_reset(tp, kind);
8241
8242         if (tp->hw_stats) {
8243                 /* Save the stats across chip resets... */
8244                 tg3_get_nstats(tp, &tp->net_stats_prev);
8245                 tg3_get_estats(tp, &tp->estats_prev);
8246
8247                 /* And make sure the next sample is new data */
8248                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8249         }
8250
8251         if (err)
8252                 return err;
8253
8254         return 0;
8255 }
8256
8257 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8258 {
8259         struct tg3 *tp = netdev_priv(dev);
8260         struct sockaddr *addr = p;
8261         int err = 0, skip_mac_1 = 0;
8262
8263         if (!is_valid_ether_addr(addr->sa_data))
8264                 return -EADDRNOTAVAIL;
8265
8266         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8267
8268         if (!netif_running(dev))
8269                 return 0;
8270
8271         if (tg3_flag(tp, ENABLE_ASF)) {
8272                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8273
8274                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8275                 addr0_low = tr32(MAC_ADDR_0_LOW);
8276                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8277                 addr1_low = tr32(MAC_ADDR_1_LOW);
8278
8279                 /* Skip MAC addr 1 if ASF is using it. */
8280                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8281                     !(addr1_high == 0 && addr1_low == 0))
8282                         skip_mac_1 = 1;
8283         }
8284         spin_lock_bh(&tp->lock);
8285         __tg3_set_mac_addr(tp, skip_mac_1);
8286         spin_unlock_bh(&tp->lock);
8287
8288         return err;
8289 }
8290
8291 /* tp->lock is held. */
8292 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8293                            dma_addr_t mapping, u32 maxlen_flags,
8294                            u32 nic_addr)
8295 {
8296         tg3_write_mem(tp,
8297                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8298                       ((u64) mapping >> 32));
8299         tg3_write_mem(tp,
8300                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8301                       ((u64) mapping & 0xffffffff));
8302         tg3_write_mem(tp,
8303                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8304                        maxlen_flags);
8305
8306         if (!tg3_flag(tp, 5705_PLUS))
8307                 tg3_write_mem(tp,
8308                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8309                               nic_addr);
8310 }
8311
8312
8313 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8314 {
8315         int i = 0;
8316
8317         if (!tg3_flag(tp, ENABLE_TSS)) {
8318                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8319                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8320                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8321         } else {
8322                 tw32(HOSTCC_TXCOL_TICKS, 0);
8323                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8324                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8325
8326                 for (; i < tp->txq_cnt; i++) {
8327                         u32 reg;
8328
8329                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8330                         tw32(reg, ec->tx_coalesce_usecs);
8331                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8332                         tw32(reg, ec->tx_max_coalesced_frames);
8333                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8334                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8335                 }
8336         }
8337
8338         for (; i < tp->irq_max - 1; i++) {
8339                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8340                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8341                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8342         }
8343 }
8344
8345 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8346 {
8347         int i = 0;
8348         u32 limit = tp->rxq_cnt;
8349
8350         if (!tg3_flag(tp, ENABLE_RSS)) {
8351                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8352                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8353                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8354                 limit--;
8355         } else {
8356                 tw32(HOSTCC_RXCOL_TICKS, 0);
8357                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8358                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8359         }
8360
8361         for (; i < limit; i++) {
8362                 u32 reg;
8363
8364                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8365                 tw32(reg, ec->rx_coalesce_usecs);
8366                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8367                 tw32(reg, ec->rx_max_coalesced_frames);
8368                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8369                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8370         }
8371
8372         for (; i < tp->irq_max - 1; i++) {
8373                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8374                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8375                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8376         }
8377 }
8378
8379 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8380 {
8381         tg3_coal_tx_init(tp, ec);
8382         tg3_coal_rx_init(tp, ec);
8383
8384         if (!tg3_flag(tp, 5705_PLUS)) {
8385                 u32 val = ec->stats_block_coalesce_usecs;
8386
8387                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8388                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8389
8390                 if (!netif_carrier_ok(tp->dev))
8391                         val = 0;
8392
8393                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8394         }
8395 }
8396
8397 /* tp->lock is held. */
8398 static void tg3_rings_reset(struct tg3 *tp)
8399 {
8400         int i;
8401         u32 stblk, txrcb, rxrcb, limit;
8402         struct tg3_napi *tnapi = &tp->napi[0];
8403
8404         /* Disable all transmit rings but the first. */
8405         if (!tg3_flag(tp, 5705_PLUS))
8406                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8407         else if (tg3_flag(tp, 5717_PLUS))
8408                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8409         else if (tg3_flag(tp, 57765_CLASS))
8410                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8411         else
8412                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8413
8414         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8415              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8416                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8417                               BDINFO_FLAGS_DISABLED);
8418
8419
8420         /* Disable all receive return rings but the first. */
8421         if (tg3_flag(tp, 5717_PLUS))
8422                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8423         else if (!tg3_flag(tp, 5705_PLUS))
8424                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8425         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8426                  tg3_flag(tp, 57765_CLASS))
8427                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8428         else
8429                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8430
8431         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8432              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8433                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8434                               BDINFO_FLAGS_DISABLED);
8435
8436         /* Disable interrupts */
8437         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8438         tp->napi[0].chk_msi_cnt = 0;
8439         tp->napi[0].last_rx_cons = 0;
8440         tp->napi[0].last_tx_cons = 0;
8441
8442         /* Zero mailbox registers. */
8443         if (tg3_flag(tp, SUPPORT_MSIX)) {
8444                 for (i = 1; i < tp->irq_max; i++) {
8445                         tp->napi[i].tx_prod = 0;
8446                         tp->napi[i].tx_cons = 0;
8447                         if (tg3_flag(tp, ENABLE_TSS))
8448                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8449                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8450                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8451                         tp->napi[i].chk_msi_cnt = 0;
8452                         tp->napi[i].last_rx_cons = 0;
8453                         tp->napi[i].last_tx_cons = 0;
8454                 }
8455                 if (!tg3_flag(tp, ENABLE_TSS))
8456                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8457         } else {
8458                 tp->napi[0].tx_prod = 0;
8459                 tp->napi[0].tx_cons = 0;
8460                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8461                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8462         }
8463
8464         /* Make sure the NIC-based send BD rings are disabled. */
8465         if (!tg3_flag(tp, 5705_PLUS)) {
8466                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8467                 for (i = 0; i < 16; i++)
8468                         tw32_tx_mbox(mbox + i * 8, 0);
8469         }
8470
8471         txrcb = NIC_SRAM_SEND_RCB;
8472         rxrcb = NIC_SRAM_RCV_RET_RCB;
8473
8474         /* Clear status block in ram. */
8475         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8476
8477         /* Set status block DMA address */
8478         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8479              ((u64) tnapi->status_mapping >> 32));
8480         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8481              ((u64) tnapi->status_mapping & 0xffffffff));
8482
8483         if (tnapi->tx_ring) {
8484                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8485                                (TG3_TX_RING_SIZE <<
8486                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8487                                NIC_SRAM_TX_BUFFER_DESC);
8488                 txrcb += TG3_BDINFO_SIZE;
8489         }
8490
8491         if (tnapi->rx_rcb) {
8492                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8493                                (tp->rx_ret_ring_mask + 1) <<
8494                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8495                 rxrcb += TG3_BDINFO_SIZE;
8496         }
8497
8498         stblk = HOSTCC_STATBLCK_RING1;
8499
8500         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8501                 u64 mapping = (u64)tnapi->status_mapping;
8502                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8503                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8504
8505                 /* Clear status block in ram. */
8506                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8507
8508                 if (tnapi->tx_ring) {
8509                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8510                                        (TG3_TX_RING_SIZE <<
8511                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8512                                        NIC_SRAM_TX_BUFFER_DESC);
8513                         txrcb += TG3_BDINFO_SIZE;
8514                 }
8515
8516                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8517                                ((tp->rx_ret_ring_mask + 1) <<
8518                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8519
8520                 stblk += 8;
8521                 rxrcb += TG3_BDINFO_SIZE;
8522         }
8523 }
8524
8525 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8526 {
8527         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8528
8529         if (!tg3_flag(tp, 5750_PLUS) ||
8530             tg3_flag(tp, 5780_CLASS) ||
8531             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8533             tg3_flag(tp, 57765_PLUS))
8534                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8535         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8536                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8537                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8538         else
8539                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8540
8541         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8542         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8543
8544         val = min(nic_rep_thresh, host_rep_thresh);
8545         tw32(RCVBDI_STD_THRESH, val);
8546
8547         if (tg3_flag(tp, 57765_PLUS))
8548                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8549
8550         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8551                 return;
8552
8553         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8554
8555         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8556
8557         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8558         tw32(RCVBDI_JUMBO_THRESH, val);
8559
8560         if (tg3_flag(tp, 57765_PLUS))
8561                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8562 }
8563
8564 static inline u32 calc_crc(unsigned char *buf, int len)
8565 {
8566         u32 reg;
8567         u32 tmp;
8568         int j, k;
8569
8570         reg = 0xffffffff;
8571
8572         for (j = 0; j < len; j++) {
8573                 reg ^= buf[j];
8574
8575                 for (k = 0; k < 8; k++) {
8576                         tmp = reg & 0x01;
8577
8578                         reg >>= 1;
8579
8580                         if (tmp)
8581                                 reg ^= 0xedb88320;
8582                 }
8583         }
8584
8585         return ~reg;
8586 }
8587
8588 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8589 {
8590         /* accept or reject all multicast frames */
8591         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8592         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8593         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8594         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8595 }
8596
8597 static void __tg3_set_rx_mode(struct net_device *dev)
8598 {
8599         struct tg3 *tp = netdev_priv(dev);
8600         u32 rx_mode;
8601
8602         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8603                                   RX_MODE_KEEP_VLAN_TAG);
8604
8605 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8606         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8607          * flag clear.
8608          */
8609         if (!tg3_flag(tp, ENABLE_ASF))
8610                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8611 #endif
8612
8613         if (dev->flags & IFF_PROMISC) {
8614                 /* Promiscuous mode. */
8615                 rx_mode |= RX_MODE_PROMISC;
8616         } else if (dev->flags & IFF_ALLMULTI) {
8617                 /* Accept all multicast. */
8618                 tg3_set_multi(tp, 1);
8619         } else if (netdev_mc_empty(dev)) {
8620                 /* Reject all multicast. */
8621                 tg3_set_multi(tp, 0);
8622         } else {
8623                 /* Accept one or more multicast(s). */
8624                 struct netdev_hw_addr *ha;
8625                 u32 mc_filter[4] = { 0, };
8626                 u32 regidx;
8627                 u32 bit;
8628                 u32 crc;
8629
8630                 netdev_for_each_mc_addr(ha, dev) {
8631                         crc = calc_crc(ha->addr, ETH_ALEN);
8632                         bit = ~crc & 0x7f;
8633                         regidx = (bit & 0x60) >> 5;
8634                         bit &= 0x1f;
8635                         mc_filter[regidx] |= (1 << bit);
8636                 }
8637
8638                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8639                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8640                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8641                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8642         }
8643
8644         if (rx_mode != tp->rx_mode) {
8645                 tp->rx_mode = rx_mode;
8646                 tw32_f(MAC_RX_MODE, rx_mode);
8647                 udelay(10);
8648         }
8649 }
8650
8651 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8652 {
8653         int i;
8654
8655         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8656                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8657 }
8658
8659 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8660 {
8661         int i;
8662
8663         if (!tg3_flag(tp, SUPPORT_MSIX))
8664                 return;
8665
8666         if (tp->irq_cnt <= 2) {
8667                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8668                 return;
8669         }
8670
8671         /* Validate table against current IRQ count */
8672         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8673                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8674                         break;
8675         }
8676
8677         if (i != TG3_RSS_INDIR_TBL_SIZE)
8678                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8679 }
8680
8681 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8682 {
8683         int i = 0;
8684         u32 reg = MAC_RSS_INDIR_TBL_0;
8685
8686         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8687                 u32 val = tp->rss_ind_tbl[i];
8688                 i++;
8689                 for (; i % 8; i++) {
8690                         val <<= 4;
8691                         val |= tp->rss_ind_tbl[i];
8692                 }
8693                 tw32(reg, val);
8694                 reg += 4;
8695         }
8696 }
8697
8698 /* tp->lock is held. */
8699 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8700 {
8701         u32 val, rdmac_mode;
8702         int i, err, limit;
8703         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8704
8705         tg3_disable_ints(tp);
8706
8707         tg3_stop_fw(tp);
8708
8709         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8710
8711         if (tg3_flag(tp, INIT_COMPLETE))
8712                 tg3_abort_hw(tp, 1);
8713
8714         /* Enable MAC control of LPI */
8715         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8716                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8717                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8718                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8719
8720                 tw32_f(TG3_CPMU_EEE_CTRL,
8721                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8722
8723                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8724                       TG3_CPMU_EEEMD_LPI_IN_TX |
8725                       TG3_CPMU_EEEMD_LPI_IN_RX |
8726                       TG3_CPMU_EEEMD_EEE_ENABLE;
8727
8728                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8729                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8730
8731                 if (tg3_flag(tp, ENABLE_APE))
8732                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8733
8734                 tw32_f(TG3_CPMU_EEE_MODE, val);
8735
8736                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8737                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8738                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8739
8740                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8741                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8742                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8743         }
8744
8745         if (reset_phy)
8746                 tg3_phy_reset(tp);
8747
8748         err = tg3_chip_reset(tp);
8749         if (err)
8750                 return err;
8751
8752         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8753
8754         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8755                 val = tr32(TG3_CPMU_CTRL);
8756                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8757                 tw32(TG3_CPMU_CTRL, val);
8758
8759                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8760                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8761                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8762                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8763
8764                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8765                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8766                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8767                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8768
8769                 val = tr32(TG3_CPMU_HST_ACC);
8770                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8771                 val |= CPMU_HST_ACC_MACCLK_6_25;
8772                 tw32(TG3_CPMU_HST_ACC, val);
8773         }
8774
8775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8776                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8777                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8778                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8779                 tw32(PCIE_PWR_MGMT_THRESH, val);
8780
8781                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8782                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8783
8784                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8785
8786                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8787                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8788         }
8789
8790         if (tg3_flag(tp, L1PLLPD_EN)) {
8791                 u32 grc_mode = tr32(GRC_MODE);
8792
8793                 /* Access the lower 1K of PL PCIE block registers. */
8794                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8795                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8796
8797                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8798                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8799                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8800
8801                 tw32(GRC_MODE, grc_mode);
8802         }
8803
8804         if (tg3_flag(tp, 57765_CLASS)) {
8805                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8806                         u32 grc_mode = tr32(GRC_MODE);
8807
8808                         /* Access the lower 1K of PL PCIE block registers. */
8809                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8810                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8811
8812                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8813                                    TG3_PCIE_PL_LO_PHYCTL5);
8814                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8815                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8816
8817                         tw32(GRC_MODE, grc_mode);
8818                 }
8819
8820                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8821                         u32 grc_mode = tr32(GRC_MODE);
8822
8823                         /* Access the lower 1K of DL PCIE block registers. */
8824                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8825                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8826
8827                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8828                                    TG3_PCIE_DL_LO_FTSMAX);
8829                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8830                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8831                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8832
8833                         tw32(GRC_MODE, grc_mode);
8834                 }
8835
8836                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8837                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8838                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8839                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8840         }
8841
8842         /* This works around an issue with Athlon chipsets on
8843          * B3 tigon3 silicon.  This bit has no effect on any
8844          * other revision.  But do not set this on PCI Express
8845          * chips and don't even touch the clocks if the CPMU is present.
8846          */
8847         if (!tg3_flag(tp, CPMU_PRESENT)) {
8848                 if (!tg3_flag(tp, PCI_EXPRESS))
8849                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8850                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8851         }
8852
8853         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8854             tg3_flag(tp, PCIX_MODE)) {
8855                 val = tr32(TG3PCI_PCISTATE);
8856                 val |= PCISTATE_RETRY_SAME_DMA;
8857                 tw32(TG3PCI_PCISTATE, val);
8858         }
8859
8860         if (tg3_flag(tp, ENABLE_APE)) {
8861                 /* Allow reads and writes to the
8862                  * APE register and memory space.
8863                  */
8864                 val = tr32(TG3PCI_PCISTATE);
8865                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8866                        PCISTATE_ALLOW_APE_SHMEM_WR |
8867                        PCISTATE_ALLOW_APE_PSPACE_WR;
8868                 tw32(TG3PCI_PCISTATE, val);
8869         }
8870
8871         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8872                 /* Enable some hw fixes.  */
8873                 val = tr32(TG3PCI_MSI_DATA);
8874                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8875                 tw32(TG3PCI_MSI_DATA, val);
8876         }
8877
8878         /* Descriptor ring init may make accesses to the
8879          * NIC SRAM area to setup the TX descriptors, so we
8880          * can only do this after the hardware has been
8881          * successfully reset.
8882          */
8883         err = tg3_init_rings(tp);
8884         if (err)
8885                 return err;
8886
8887         if (tg3_flag(tp, 57765_PLUS)) {
8888                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8889                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8890                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8891                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8892                 if (!tg3_flag(tp, 57765_CLASS) &&
8893                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8894                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8895                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8896         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8897                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8898                 /* This value is determined during the probe time DMA
8899                  * engine test, tg3_test_dma.
8900                  */
8901                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8902         }
8903
8904         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8905                           GRC_MODE_4X_NIC_SEND_RINGS |
8906                           GRC_MODE_NO_TX_PHDR_CSUM |
8907                           GRC_MODE_NO_RX_PHDR_CSUM);
8908         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8909
8910         /* Pseudo-header checksum is done by hardware logic and not
8911          * the offload processers, so make the chip do the pseudo-
8912          * header checksums on receive.  For transmit it is more
8913          * convenient to do the pseudo-header checksum in software
8914          * as Linux does that on transmit for us in all cases.
8915          */
8916         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8917
8918         tw32(GRC_MODE,
8919              tp->grc_mode |
8920              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8921
8922         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8923         val = tr32(GRC_MISC_CFG);
8924         val &= ~0xff;
8925         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8926         tw32(GRC_MISC_CFG, val);
8927
8928         /* Initialize MBUF/DESC pool. */
8929         if (tg3_flag(tp, 5750_PLUS)) {
8930                 /* Do nothing.  */
8931         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8932                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8933                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8934                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8935                 else
8936                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8937                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8938                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8939         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8940                 int fw_len;
8941
8942                 fw_len = tp->fw_len;
8943                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8944                 tw32(BUFMGR_MB_POOL_ADDR,
8945                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8946                 tw32(BUFMGR_MB_POOL_SIZE,
8947                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8948         }
8949
8950         if (tp->dev->mtu <= ETH_DATA_LEN) {
8951                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8952                      tp->bufmgr_config.mbuf_read_dma_low_water);
8953                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8954                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8955                 tw32(BUFMGR_MB_HIGH_WATER,
8956                      tp->bufmgr_config.mbuf_high_water);
8957         } else {
8958                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8959                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8960                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8961                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8962                 tw32(BUFMGR_MB_HIGH_WATER,
8963                      tp->bufmgr_config.mbuf_high_water_jumbo);
8964         }
8965         tw32(BUFMGR_DMA_LOW_WATER,
8966              tp->bufmgr_config.dma_low_water);
8967         tw32(BUFMGR_DMA_HIGH_WATER,
8968              tp->bufmgr_config.dma_high_water);
8969
8970         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8972                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8973         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8974             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8975             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8976                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8977         tw32(BUFMGR_MODE, val);
8978         for (i = 0; i < 2000; i++) {
8979                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8980                         break;
8981                 udelay(10);
8982         }
8983         if (i >= 2000) {
8984                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8985                 return -ENODEV;
8986         }
8987
8988         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8989                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8990
8991         tg3_setup_rxbd_thresholds(tp);
8992
8993         /* Initialize TG3_BDINFO's at:
8994          *  RCVDBDI_STD_BD:     standard eth size rx ring
8995          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8996          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8997          *
8998          * like so:
8999          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9000          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9001          *                              ring attribute flags
9002          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9003          *
9004          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9005          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9006          *
9007          * The size of each ring is fixed in the firmware, but the location is
9008          * configurable.
9009          */
9010         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9011              ((u64) tpr->rx_std_mapping >> 32));
9012         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9013              ((u64) tpr->rx_std_mapping & 0xffffffff));
9014         if (!tg3_flag(tp, 5717_PLUS))
9015                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9016                      NIC_SRAM_RX_BUFFER_DESC);
9017
9018         /* Disable the mini ring */
9019         if (!tg3_flag(tp, 5705_PLUS))
9020                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9021                      BDINFO_FLAGS_DISABLED);
9022
9023         /* Program the jumbo buffer descriptor ring control
9024          * blocks on those devices that have them.
9025          */
9026         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9027             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9028
9029                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9030                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9031                              ((u64) tpr->rx_jmb_mapping >> 32));
9032                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9033                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9034                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9035                               BDINFO_FLAGS_MAXLEN_SHIFT;
9036                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9037                              val | BDINFO_FLAGS_USE_EXT_RECV);
9038                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9039                             tg3_flag(tp, 57765_CLASS))
9040                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9041                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9042                 } else {
9043                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9044                              BDINFO_FLAGS_DISABLED);
9045                 }
9046
9047                 if (tg3_flag(tp, 57765_PLUS)) {
9048                         val = TG3_RX_STD_RING_SIZE(tp);
9049                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9050                         val |= (TG3_RX_STD_DMA_SZ << 2);
9051                 } else
9052                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9053         } else
9054                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9055
9056         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9057
9058         tpr->rx_std_prod_idx = tp->rx_pending;
9059         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9060
9061         tpr->rx_jmb_prod_idx =
9062                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9063         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9064
9065         tg3_rings_reset(tp);
9066
9067         /* Initialize MAC address and backoff seed. */
9068         __tg3_set_mac_addr(tp, 0);
9069
9070         /* MTU + ethernet header + FCS + optional VLAN tag */
9071         tw32(MAC_RX_MTU_SIZE,
9072              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9073
9074         /* The slot time is changed by tg3_setup_phy if we
9075          * run at gigabit with half duplex.
9076          */
9077         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9078               (6 << TX_LENGTHS_IPG_SHIFT) |
9079               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9080
9081         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9082                 val |= tr32(MAC_TX_LENGTHS) &
9083                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9084                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9085
9086         tw32(MAC_TX_LENGTHS, val);
9087
9088         /* Receive rules. */
9089         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9090         tw32(RCVLPC_CONFIG, 0x0181);
9091
9092         /* Calculate RDMAC_MODE setting early, we need it to determine
9093          * the RCVLPC_STATE_ENABLE mask.
9094          */
9095         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9096                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9097                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9098                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9099                       RDMAC_MODE_LNGREAD_ENAB);
9100
9101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9102                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9103
9104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9106             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9107                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9108                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9109                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9110
9111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9112             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9113                 if (tg3_flag(tp, TSO_CAPABLE) &&
9114                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9115                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9116                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9117                            !tg3_flag(tp, IS_5788)) {
9118                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9119                 }
9120         }
9121
9122         if (tg3_flag(tp, PCI_EXPRESS))
9123                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9124
9125         if (tg3_flag(tp, HW_TSO_1) ||
9126             tg3_flag(tp, HW_TSO_2) ||
9127             tg3_flag(tp, HW_TSO_3))
9128                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9129
9130         if (tg3_flag(tp, 57765_PLUS) ||
9131             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9132             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9133                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9134
9135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9136                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9137
9138         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9139             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9140             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9141             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9142             tg3_flag(tp, 57765_PLUS)) {
9143                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9144                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9145                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9146                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9147                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9148                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9149                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9150                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9151                 }
9152                 tw32(TG3_RDMA_RSRVCTRL_REG,
9153                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9154         }
9155
9156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9157             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9158                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9159                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9160                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9161                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9162         }
9163
9164         /* Receive/send statistics. */
9165         if (tg3_flag(tp, 5750_PLUS)) {
9166                 val = tr32(RCVLPC_STATS_ENABLE);
9167                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9168                 tw32(RCVLPC_STATS_ENABLE, val);
9169         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9170                    tg3_flag(tp, TSO_CAPABLE)) {
9171                 val = tr32(RCVLPC_STATS_ENABLE);
9172                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9173                 tw32(RCVLPC_STATS_ENABLE, val);
9174         } else {
9175                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9176         }
9177         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9178         tw32(SNDDATAI_STATSENAB, 0xffffff);
9179         tw32(SNDDATAI_STATSCTRL,
9180              (SNDDATAI_SCTRL_ENABLE |
9181               SNDDATAI_SCTRL_FASTUPD));
9182
9183         /* Setup host coalescing engine. */
9184         tw32(HOSTCC_MODE, 0);
9185         for (i = 0; i < 2000; i++) {
9186                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9187                         break;
9188                 udelay(10);
9189         }
9190
9191         __tg3_set_coalesce(tp, &tp->coal);
9192
9193         if (!tg3_flag(tp, 5705_PLUS)) {
9194                 /* Status/statistics block address.  See tg3_timer,
9195                  * the tg3_periodic_fetch_stats call there, and
9196                  * tg3_get_stats to see how this works for 5705/5750 chips.
9197                  */
9198                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9199                      ((u64) tp->stats_mapping >> 32));
9200                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9201                      ((u64) tp->stats_mapping & 0xffffffff));
9202                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9203
9204                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9205
9206                 /* Clear statistics and status block memory areas */
9207                 for (i = NIC_SRAM_STATS_BLK;
9208                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9209                      i += sizeof(u32)) {
9210                         tg3_write_mem(tp, i, 0);
9211                         udelay(40);
9212                 }
9213         }
9214
9215         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9216
9217         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9218         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9219         if (!tg3_flag(tp, 5705_PLUS))
9220                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9221
9222         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9223                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9224                 /* reset to prevent losing 1st rx packet intermittently */
9225                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9226                 udelay(10);
9227         }
9228
9229         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9230                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9231                         MAC_MODE_FHDE_ENABLE;
9232         if (tg3_flag(tp, ENABLE_APE))
9233                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9234         if (!tg3_flag(tp, 5705_PLUS) &&
9235             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9236             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9237                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9238         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9239         udelay(40);
9240
9241         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9242          * If TG3_FLAG_IS_NIC is zero, we should read the
9243          * register to preserve the GPIO settings for LOMs. The GPIOs,
9244          * whether used as inputs or outputs, are set by boot code after
9245          * reset.
9246          */
9247         if (!tg3_flag(tp, IS_NIC)) {
9248                 u32 gpio_mask;
9249
9250                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9251                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9252                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9253
9254                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9255                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9256                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9257
9258                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9259                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9260
9261                 tp->grc_local_ctrl &= ~gpio_mask;
9262                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9263
9264                 /* GPIO1 must be driven high for eeprom write protect */
9265                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9266                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9267                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9268         }
9269         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9270         udelay(100);
9271
9272         if (tg3_flag(tp, USING_MSIX)) {
9273                 val = tr32(MSGINT_MODE);
9274                 val |= MSGINT_MODE_ENABLE;
9275                 if (tp->irq_cnt > 1)
9276                         val |= MSGINT_MODE_MULTIVEC_EN;
9277                 if (!tg3_flag(tp, 1SHOT_MSI))
9278                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9279                 tw32(MSGINT_MODE, val);
9280         }
9281
9282         if (!tg3_flag(tp, 5705_PLUS)) {
9283                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9284                 udelay(40);
9285         }
9286
9287         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9288                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9289                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9290                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9291                WDMAC_MODE_LNGREAD_ENAB);
9292
9293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9294             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9295                 if (tg3_flag(tp, TSO_CAPABLE) &&
9296                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9297                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9298                         /* nothing */
9299                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9300                            !tg3_flag(tp, IS_5788)) {
9301                         val |= WDMAC_MODE_RX_ACCEL;
9302                 }
9303         }
9304
9305         /* Enable host coalescing bug fix */
9306         if (tg3_flag(tp, 5755_PLUS))
9307                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9308
9309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9310                 val |= WDMAC_MODE_BURST_ALL_DATA;
9311
9312         tw32_f(WDMAC_MODE, val);
9313         udelay(40);
9314
9315         if (tg3_flag(tp, PCIX_MODE)) {
9316                 u16 pcix_cmd;
9317
9318                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9319                                      &pcix_cmd);
9320                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9321                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9322                         pcix_cmd |= PCI_X_CMD_READ_2K;
9323                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9324                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9325                         pcix_cmd |= PCI_X_CMD_READ_2K;
9326                 }
9327                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9328                                       pcix_cmd);
9329         }
9330
9331         tw32_f(RDMAC_MODE, rdmac_mode);
9332         udelay(40);
9333
9334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9335                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9336                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9337                                 break;
9338                 }
9339                 if (i < TG3_NUM_RDMA_CHANNELS) {
9340                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9341                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9342                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9343                         tg3_flag_set(tp, 5719_RDMA_BUG);
9344                 }
9345         }
9346
9347         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9348         if (!tg3_flag(tp, 5705_PLUS))
9349                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9350
9351         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9352                 tw32(SNDDATAC_MODE,
9353                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9354         else
9355                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9356
9357         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9358         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9359         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9360         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9361                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9362         tw32(RCVDBDI_MODE, val);
9363         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9364         if (tg3_flag(tp, HW_TSO_1) ||
9365             tg3_flag(tp, HW_TSO_2) ||
9366             tg3_flag(tp, HW_TSO_3))
9367                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9368         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9369         if (tg3_flag(tp, ENABLE_TSS))
9370                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9371         tw32(SNDBDI_MODE, val);
9372         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9373
9374         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9375                 err = tg3_load_5701_a0_firmware_fix(tp);
9376                 if (err)
9377                         return err;
9378         }
9379
9380         if (tg3_flag(tp, TSO_CAPABLE)) {
9381                 err = tg3_load_tso_firmware(tp);
9382                 if (err)
9383                         return err;
9384         }
9385
9386         tp->tx_mode = TX_MODE_ENABLE;
9387
9388         if (tg3_flag(tp, 5755_PLUS) ||
9389             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9390                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9391
9392         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9393                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9394                 tp->tx_mode &= ~val;
9395                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9396         }
9397
9398         tw32_f(MAC_TX_MODE, tp->tx_mode);
9399         udelay(100);
9400
9401         if (tg3_flag(tp, ENABLE_RSS)) {
9402                 tg3_rss_write_indir_tbl(tp);
9403
9404                 /* Setup the "secret" hash key. */
9405                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9406                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9407                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9408                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9409                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9410                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9411                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9412                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9413                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9414                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9415         }
9416
9417         tp->rx_mode = RX_MODE_ENABLE;
9418         if (tg3_flag(tp, 5755_PLUS))
9419                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9420
9421         if (tg3_flag(tp, ENABLE_RSS))
9422                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9423                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9424                                RX_MODE_RSS_IPV6_HASH_EN |
9425                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9426                                RX_MODE_RSS_IPV4_HASH_EN |
9427                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9428
9429         tw32_f(MAC_RX_MODE, tp->rx_mode);
9430         udelay(10);
9431
9432         tw32(MAC_LED_CTRL, tp->led_ctrl);
9433
9434         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9435         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9436                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9437                 udelay(10);
9438         }
9439         tw32_f(MAC_RX_MODE, tp->rx_mode);
9440         udelay(10);
9441
9442         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9443                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9444                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9445                         /* Set drive transmission level to 1.2V  */
9446                         /* only if the signal pre-emphasis bit is not set  */
9447                         val = tr32(MAC_SERDES_CFG);
9448                         val &= 0xfffff000;
9449                         val |= 0x880;
9450                         tw32(MAC_SERDES_CFG, val);
9451                 }
9452                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9453                         tw32(MAC_SERDES_CFG, 0x616000);
9454         }
9455
9456         /* Prevent chip from dropping frames when flow control
9457          * is enabled.
9458          */
9459         if (tg3_flag(tp, 57765_CLASS))
9460                 val = 1;
9461         else
9462                 val = 2;
9463         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9464
9465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9466             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9467                 /* Use hardware link auto-negotiation */
9468                 tg3_flag_set(tp, HW_AUTONEG);
9469         }
9470
9471         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9473                 u32 tmp;
9474
9475                 tmp = tr32(SERDES_RX_CTRL);
9476                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9477                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9478                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9479                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9480         }
9481
9482         if (!tg3_flag(tp, USE_PHYLIB)) {
9483                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9484                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9485
9486                 err = tg3_setup_phy(tp, 0);
9487                 if (err)
9488                         return err;
9489
9490                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9491                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9492                         u32 tmp;
9493
9494                         /* Clear CRC stats. */
9495                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9496                                 tg3_writephy(tp, MII_TG3_TEST1,
9497                                              tmp | MII_TG3_TEST1_CRC_EN);
9498                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9499                         }
9500                 }
9501         }
9502
9503         __tg3_set_rx_mode(tp->dev);
9504
9505         /* Initialize receive rules. */
9506         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9507         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9508         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9509         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9510
9511         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9512                 limit = 8;
9513         else
9514                 limit = 16;
9515         if (tg3_flag(tp, ENABLE_ASF))
9516                 limit -= 4;
9517         switch (limit) {
9518         case 16:
9519                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9520         case 15:
9521                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9522         case 14:
9523                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9524         case 13:
9525                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9526         case 12:
9527                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9528         case 11:
9529                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9530         case 10:
9531                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9532         case 9:
9533                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9534         case 8:
9535                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9536         case 7:
9537                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9538         case 6:
9539                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9540         case 5:
9541                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9542         case 4:
9543                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9544         case 3:
9545                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9546         case 2:
9547         case 1:
9548
9549         default:
9550                 break;
9551         }
9552
9553         if (tg3_flag(tp, ENABLE_APE))
9554                 /* Write our heartbeat update interval to APE. */
9555                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9556                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9557
9558         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9559
9560         return 0;
9561 }
9562
9563 /* Called at device open time to get the chip ready for
9564  * packet processing.  Invoked with tp->lock held.
9565  */
9566 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9567 {
9568         tg3_switch_clocks(tp);
9569
9570         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9571
9572         return tg3_reset_hw(tp, reset_phy);
9573 }
9574
9575 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9576 {
9577         int i;
9578
9579         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9580                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9581
9582                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9583                 off += len;
9584
9585                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9586                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9587                         memset(ocir, 0, TG3_OCIR_LEN);
9588         }
9589 }
9590
9591 /* sysfs attributes for hwmon */
9592 static ssize_t tg3_show_temp(struct device *dev,
9593                              struct device_attribute *devattr, char *buf)
9594 {
9595         struct pci_dev *pdev = to_pci_dev(dev);
9596         struct net_device *netdev = pci_get_drvdata(pdev);
9597         struct tg3 *tp = netdev_priv(netdev);
9598         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9599         u32 temperature;
9600
9601         spin_lock_bh(&tp->lock);
9602         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9603                                 sizeof(temperature));
9604         spin_unlock_bh(&tp->lock);
9605         return sprintf(buf, "%u\n", temperature);
9606 }
9607
9608
9609 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9610                           TG3_TEMP_SENSOR_OFFSET);
9611 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9612                           TG3_TEMP_CAUTION_OFFSET);
9613 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9614                           TG3_TEMP_MAX_OFFSET);
9615
9616 static struct attribute *tg3_attributes[] = {
9617         &sensor_dev_attr_temp1_input.dev_attr.attr,
9618         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9619         &sensor_dev_attr_temp1_max.dev_attr.attr,
9620         NULL
9621 };
9622
9623 static const struct attribute_group tg3_group = {
9624         .attrs = tg3_attributes,
9625 };
9626
9627 static void tg3_hwmon_close(struct tg3 *tp)
9628 {
9629         if (tp->hwmon_dev) {
9630                 hwmon_device_unregister(tp->hwmon_dev);
9631                 tp->hwmon_dev = NULL;
9632                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9633         }
9634 }
9635
9636 static void tg3_hwmon_open(struct tg3 *tp)
9637 {
9638         int i, err;
9639         u32 size = 0;
9640         struct pci_dev *pdev = tp->pdev;
9641         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9642
9643         tg3_sd_scan_scratchpad(tp, ocirs);
9644
9645         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9646                 if (!ocirs[i].src_data_length)
9647                         continue;
9648
9649                 size += ocirs[i].src_hdr_length;
9650                 size += ocirs[i].src_data_length;
9651         }
9652
9653         if (!size)
9654                 return;
9655
9656         /* Register hwmon sysfs hooks */
9657         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9658         if (err) {
9659                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9660                 return;
9661         }
9662
9663         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9664         if (IS_ERR(tp->hwmon_dev)) {
9665                 tp->hwmon_dev = NULL;
9666                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9667                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9668         }
9669 }
9670
9671
9672 #define TG3_STAT_ADD32(PSTAT, REG) \
9673 do {    u32 __val = tr32(REG); \
9674         (PSTAT)->low += __val; \
9675         if ((PSTAT)->low < __val) \
9676                 (PSTAT)->high += 1; \
9677 } while (0)
9678
9679 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9680 {
9681         struct tg3_hw_stats *sp = tp->hw_stats;
9682
9683         if (!netif_carrier_ok(tp->dev))
9684                 return;
9685
9686         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9687         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9688         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9689         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9690         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9691         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9692         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9693         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9694         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9695         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9696         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9697         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9698         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9699         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9700                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9701                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9702                 u32 val;
9703
9704                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9705                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9706                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9707                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9708         }
9709
9710         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9711         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9712         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9713         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9714         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9715         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9716         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9717         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9718         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9719         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9720         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9721         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9722         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9723         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9724
9725         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9726         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9727             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9728             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9729                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9730         } else {
9731                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9732                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9733                 if (val) {
9734                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9735                         sp->rx_discards.low += val;
9736                         if (sp->rx_discards.low < val)
9737                                 sp->rx_discards.high += 1;
9738                 }
9739                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9740         }
9741         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9742 }
9743
9744 static void tg3_chk_missed_msi(struct tg3 *tp)
9745 {
9746         u32 i;
9747
9748         for (i = 0; i < tp->irq_cnt; i++) {
9749                 struct tg3_napi *tnapi = &tp->napi[i];
9750
9751                 if (tg3_has_work(tnapi)) {
9752                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9753                             tnapi->last_tx_cons == tnapi->tx_cons) {
9754                                 if (tnapi->chk_msi_cnt < 1) {
9755                                         tnapi->chk_msi_cnt++;
9756                                         return;
9757                                 }
9758                                 tg3_msi(0, tnapi);
9759                         }
9760                 }
9761                 tnapi->chk_msi_cnt = 0;
9762                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9763                 tnapi->last_tx_cons = tnapi->tx_cons;
9764         }
9765 }
9766
9767 static void tg3_timer(unsigned long __opaque)
9768 {
9769         struct tg3 *tp = (struct tg3 *) __opaque;
9770
9771         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9772                 goto restart_timer;
9773
9774         spin_lock(&tp->lock);
9775
9776         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9777             tg3_flag(tp, 57765_CLASS))
9778                 tg3_chk_missed_msi(tp);
9779
9780         if (!tg3_flag(tp, TAGGED_STATUS)) {
9781                 /* All of this garbage is because when using non-tagged
9782                  * IRQ status the mailbox/status_block protocol the chip
9783                  * uses with the cpu is race prone.
9784                  */
9785                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9786                         tw32(GRC_LOCAL_CTRL,
9787                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9788                 } else {
9789                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9790                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9791                 }
9792
9793                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9794                         spin_unlock(&tp->lock);
9795                         tg3_reset_task_schedule(tp);
9796                         goto restart_timer;
9797                 }
9798         }
9799
9800         /* This part only runs once per second. */
9801         if (!--tp->timer_counter) {
9802                 if (tg3_flag(tp, 5705_PLUS))
9803                         tg3_periodic_fetch_stats(tp);
9804
9805                 if (tp->setlpicnt && !--tp->setlpicnt)
9806                         tg3_phy_eee_enable(tp);
9807
9808                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9809                         u32 mac_stat;
9810                         int phy_event;
9811
9812                         mac_stat = tr32(MAC_STATUS);
9813
9814                         phy_event = 0;
9815                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9816                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9817                                         phy_event = 1;
9818                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9819                                 phy_event = 1;
9820
9821                         if (phy_event)
9822                                 tg3_setup_phy(tp, 0);
9823                 } else if (tg3_flag(tp, POLL_SERDES)) {
9824                         u32 mac_stat = tr32(MAC_STATUS);
9825                         int need_setup = 0;
9826
9827                         if (netif_carrier_ok(tp->dev) &&
9828                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9829                                 need_setup = 1;
9830                         }
9831                         if (!netif_carrier_ok(tp->dev) &&
9832                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9833                                          MAC_STATUS_SIGNAL_DET))) {
9834                                 need_setup = 1;
9835                         }
9836                         if (need_setup) {
9837                                 if (!tp->serdes_counter) {
9838                                         tw32_f(MAC_MODE,
9839                                              (tp->mac_mode &
9840                                               ~MAC_MODE_PORT_MODE_MASK));
9841                                         udelay(40);
9842                                         tw32_f(MAC_MODE, tp->mac_mode);
9843                                         udelay(40);
9844                                 }
9845                                 tg3_setup_phy(tp, 0);
9846                         }
9847                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9848                            tg3_flag(tp, 5780_CLASS)) {
9849                         tg3_serdes_parallel_detect(tp);
9850                 }
9851
9852                 tp->timer_counter = tp->timer_multiplier;
9853         }
9854
9855         /* Heartbeat is only sent once every 2 seconds.
9856          *
9857          * The heartbeat is to tell the ASF firmware that the host
9858          * driver is still alive.  In the event that the OS crashes,
9859          * ASF needs to reset the hardware to free up the FIFO space
9860          * that may be filled with rx packets destined for the host.
9861          * If the FIFO is full, ASF will no longer function properly.
9862          *
9863          * Unintended resets have been reported on real time kernels
9864          * where the timer doesn't run on time.  Netpoll will also have
9865          * same problem.
9866          *
9867          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9868          * to check the ring condition when the heartbeat is expiring
9869          * before doing the reset.  This will prevent most unintended
9870          * resets.
9871          */
9872         if (!--tp->asf_counter) {
9873                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9874                         tg3_wait_for_event_ack(tp);
9875
9876                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9877                                       FWCMD_NICDRV_ALIVE3);
9878                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9879                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9880                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9881
9882                         tg3_generate_fw_event(tp);
9883                 }
9884                 tp->asf_counter = tp->asf_multiplier;
9885         }
9886
9887         spin_unlock(&tp->lock);
9888
9889 restart_timer:
9890         tp->timer.expires = jiffies + tp->timer_offset;
9891         add_timer(&tp->timer);
9892 }
9893
9894 static void __devinit tg3_timer_init(struct tg3 *tp)
9895 {
9896         if (tg3_flag(tp, TAGGED_STATUS) &&
9897             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9898             !tg3_flag(tp, 57765_CLASS))
9899                 tp->timer_offset = HZ;
9900         else
9901                 tp->timer_offset = HZ / 10;
9902
9903         BUG_ON(tp->timer_offset > HZ);
9904
9905         tp->timer_multiplier = (HZ / tp->timer_offset);
9906         tp->asf_multiplier = (HZ / tp->timer_offset) *
9907                              TG3_FW_UPDATE_FREQ_SEC;
9908
9909         init_timer(&tp->timer);
9910         tp->timer.data = (unsigned long) tp;
9911         tp->timer.function = tg3_timer;
9912 }
9913
9914 static void tg3_timer_start(struct tg3 *tp)
9915 {
9916         tp->asf_counter   = tp->asf_multiplier;
9917         tp->timer_counter = tp->timer_multiplier;
9918
9919         tp->timer.expires = jiffies + tp->timer_offset;
9920         add_timer(&tp->timer);
9921 }
9922
9923 static void tg3_timer_stop(struct tg3 *tp)
9924 {
9925         del_timer_sync(&tp->timer);
9926 }
9927
9928 /* Restart hardware after configuration changes, self-test, etc.
9929  * Invoked with tp->lock held.
9930  */
9931 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9932         __releases(tp->lock)
9933         __acquires(tp->lock)
9934 {
9935         int err;
9936
9937         err = tg3_init_hw(tp, reset_phy);
9938         if (err) {
9939                 netdev_err(tp->dev,
9940                            "Failed to re-initialize device, aborting\n");
9941                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9942                 tg3_full_unlock(tp);
9943                 tg3_timer_stop(tp);
9944                 tp->irq_sync = 0;
9945                 tg3_napi_enable(tp);
9946                 dev_close(tp->dev);
9947                 tg3_full_lock(tp, 0);
9948         }
9949         return err;
9950 }
9951
9952 static void tg3_reset_task(struct work_struct *work)
9953 {
9954         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9955         int err;
9956
9957         tg3_full_lock(tp, 0);
9958
9959         if (!netif_running(tp->dev)) {
9960                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9961                 tg3_full_unlock(tp);
9962                 return;
9963         }
9964
9965         tg3_full_unlock(tp);
9966
9967         tg3_phy_stop(tp);
9968
9969         tg3_netif_stop(tp);
9970
9971         tg3_full_lock(tp, 1);
9972
9973         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9974                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9975                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9976                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9977                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9978         }
9979
9980         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9981         err = tg3_init_hw(tp, 1);
9982         if (err)
9983                 goto out;
9984
9985         tg3_netif_start(tp);
9986
9987 out:
9988         tg3_full_unlock(tp);
9989
9990         if (!err)
9991                 tg3_phy_start(tp);
9992
9993         tg3_flag_clear(tp, RESET_TASK_PENDING);
9994 }
9995
9996 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9997 {
9998         irq_handler_t fn;
9999         unsigned long flags;
10000         char *name;
10001         struct tg3_napi *tnapi = &tp->napi[irq_num];
10002
10003         if (tp->irq_cnt == 1)
10004                 name = tp->dev->name;
10005         else {
10006                 name = &tnapi->irq_lbl[0];
10007                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10008                 name[IFNAMSIZ-1] = 0;
10009         }
10010
10011         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10012                 fn = tg3_msi;
10013                 if (tg3_flag(tp, 1SHOT_MSI))
10014                         fn = tg3_msi_1shot;
10015                 flags = 0;
10016         } else {
10017                 fn = tg3_interrupt;
10018                 if (tg3_flag(tp, TAGGED_STATUS))
10019                         fn = tg3_interrupt_tagged;
10020                 flags = IRQF_SHARED;
10021         }
10022
10023         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10024 }
10025
10026 static int tg3_test_interrupt(struct tg3 *tp)
10027 {
10028         struct tg3_napi *tnapi = &tp->napi[0];
10029         struct net_device *dev = tp->dev;
10030         int err, i, intr_ok = 0;
10031         u32 val;
10032
10033         if (!netif_running(dev))
10034                 return -ENODEV;
10035
10036         tg3_disable_ints(tp);
10037
10038         free_irq(tnapi->irq_vec, tnapi);
10039
10040         /*
10041          * Turn off MSI one shot mode.  Otherwise this test has no
10042          * observable way to know whether the interrupt was delivered.
10043          */
10044         if (tg3_flag(tp, 57765_PLUS)) {
10045                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10046                 tw32(MSGINT_MODE, val);
10047         }
10048
10049         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10050                           IRQF_SHARED, dev->name, tnapi);
10051         if (err)
10052                 return err;
10053
10054         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10055         tg3_enable_ints(tp);
10056
10057         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10058                tnapi->coal_now);
10059
10060         for (i = 0; i < 5; i++) {
10061                 u32 int_mbox, misc_host_ctrl;
10062
10063                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10064                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10065
10066                 if ((int_mbox != 0) ||
10067                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10068                         intr_ok = 1;
10069                         break;
10070                 }
10071
10072                 if (tg3_flag(tp, 57765_PLUS) &&
10073                     tnapi->hw_status->status_tag != tnapi->last_tag)
10074                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10075
10076                 msleep(10);
10077         }
10078
10079         tg3_disable_ints(tp);
10080
10081         free_irq(tnapi->irq_vec, tnapi);
10082
10083         err = tg3_request_irq(tp, 0);
10084
10085         if (err)
10086                 return err;
10087
10088         if (intr_ok) {
10089                 /* Reenable MSI one shot mode. */
10090                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10091                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10092                         tw32(MSGINT_MODE, val);
10093                 }
10094                 return 0;
10095         }
10096
10097         return -EIO;
10098 }
10099
10100 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10101  * successfully restored
10102  */
10103 static int tg3_test_msi(struct tg3 *tp)
10104 {
10105         int err;
10106         u16 pci_cmd;
10107
10108         if (!tg3_flag(tp, USING_MSI))
10109                 return 0;
10110
10111         /* Turn off SERR reporting in case MSI terminates with Master
10112          * Abort.
10113          */
10114         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10115         pci_write_config_word(tp->pdev, PCI_COMMAND,
10116                               pci_cmd & ~PCI_COMMAND_SERR);
10117
10118         err = tg3_test_interrupt(tp);
10119
10120         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10121
10122         if (!err)
10123                 return 0;
10124
10125         /* other failures */
10126         if (err != -EIO)
10127                 return err;
10128
10129         /* MSI test failed, go back to INTx mode */
10130         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10131                     "to INTx mode. Please report this failure to the PCI "
10132                     "maintainer and include system chipset information\n");
10133
10134         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10135
10136         pci_disable_msi(tp->pdev);
10137
10138         tg3_flag_clear(tp, USING_MSI);
10139         tp->napi[0].irq_vec = tp->pdev->irq;
10140
10141         err = tg3_request_irq(tp, 0);
10142         if (err)
10143                 return err;
10144
10145         /* Need to reset the chip because the MSI cycle may have terminated
10146          * with Master Abort.
10147          */
10148         tg3_full_lock(tp, 1);
10149
10150         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10151         err = tg3_init_hw(tp, 1);
10152
10153         tg3_full_unlock(tp);
10154
10155         if (err)
10156                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10157
10158         return err;
10159 }
10160
10161 static int tg3_request_firmware(struct tg3 *tp)
10162 {
10163         const __be32 *fw_data;
10164
10165         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10166                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10167                            tp->fw_needed);
10168                 return -ENOENT;
10169         }
10170
10171         fw_data = (void *)tp->fw->data;
10172
10173         /* Firmware blob starts with version numbers, followed by
10174          * start address and _full_ length including BSS sections
10175          * (which must be longer than the actual data, of course
10176          */
10177
10178         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10179         if (tp->fw_len < (tp->fw->size - 12)) {
10180                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10181                            tp->fw_len, tp->fw_needed);
10182                 release_firmware(tp->fw);
10183                 tp->fw = NULL;
10184                 return -EINVAL;
10185         }
10186
10187         /* We no longer need firmware; we have it. */
10188         tp->fw_needed = NULL;
10189         return 0;
10190 }
10191
10192 static u32 tg3_irq_count(struct tg3 *tp)
10193 {
10194         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10195
10196         if (irq_cnt > 1) {
10197                 /* We want as many rx rings enabled as there are cpus.
10198                  * In multiqueue MSI-X mode, the first MSI-X vector
10199                  * only deals with link interrupts, etc, so we add
10200                  * one to the number of vectors we are requesting.
10201                  */
10202                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10203         }
10204
10205         return irq_cnt;
10206 }
10207
10208 static bool tg3_enable_msix(struct tg3 *tp)
10209 {
10210         int i, rc;
10211         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10212
10213         tp->txq_cnt = tp->txq_req;
10214         tp->rxq_cnt = tp->rxq_req;
10215         if (!tp->rxq_cnt)
10216                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10217         if (tp->rxq_cnt > tp->rxq_max)
10218                 tp->rxq_cnt = tp->rxq_max;
10219
10220         /* Disable multiple TX rings by default.  Simple round-robin hardware
10221          * scheduling of the TX rings can cause starvation of rings with
10222          * small packets when other rings have TSO or jumbo packets.
10223          */
10224         if (!tp->txq_req)
10225                 tp->txq_cnt = 1;
10226
10227         tp->irq_cnt = tg3_irq_count(tp);
10228
10229         for (i = 0; i < tp->irq_max; i++) {
10230                 msix_ent[i].entry  = i;
10231                 msix_ent[i].vector = 0;
10232         }
10233
10234         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10235         if (rc < 0) {
10236                 return false;
10237         } else if (rc != 0) {
10238                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10239                         return false;
10240                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10241                               tp->irq_cnt, rc);
10242                 tp->irq_cnt = rc;
10243                 tp->rxq_cnt = max(rc - 1, 1);
10244                 if (tp->txq_cnt)
10245                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10246         }
10247
10248         for (i = 0; i < tp->irq_max; i++)
10249                 tp->napi[i].irq_vec = msix_ent[i].vector;
10250
10251         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10252                 pci_disable_msix(tp->pdev);
10253                 return false;
10254         }
10255
10256         if (tp->irq_cnt == 1)
10257                 return true;
10258
10259         tg3_flag_set(tp, ENABLE_RSS);
10260
10261         if (tp->txq_cnt > 1)
10262                 tg3_flag_set(tp, ENABLE_TSS);
10263
10264         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10265
10266         return true;
10267 }
10268
10269 static void tg3_ints_init(struct tg3 *tp)
10270 {
10271         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10272             !tg3_flag(tp, TAGGED_STATUS)) {
10273                 /* All MSI supporting chips should support tagged
10274                  * status.  Assert that this is the case.
10275                  */
10276                 netdev_warn(tp->dev,
10277                             "MSI without TAGGED_STATUS? Not using MSI\n");
10278                 goto defcfg;
10279         }
10280
10281         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10282                 tg3_flag_set(tp, USING_MSIX);
10283         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10284                 tg3_flag_set(tp, USING_MSI);
10285
10286         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10287                 u32 msi_mode = tr32(MSGINT_MODE);
10288                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10289                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10290                 if (!tg3_flag(tp, 1SHOT_MSI))
10291                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10292                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10293         }
10294 defcfg:
10295         if (!tg3_flag(tp, USING_MSIX)) {
10296                 tp->irq_cnt = 1;
10297                 tp->napi[0].irq_vec = tp->pdev->irq;
10298         }
10299
10300         if (tp->irq_cnt == 1) {
10301                 tp->txq_cnt = 1;
10302                 tp->rxq_cnt = 1;
10303                 netif_set_real_num_tx_queues(tp->dev, 1);
10304                 netif_set_real_num_rx_queues(tp->dev, 1);
10305         }
10306 }
10307
10308 static void tg3_ints_fini(struct tg3 *tp)
10309 {
10310         if (tg3_flag(tp, USING_MSIX))
10311                 pci_disable_msix(tp->pdev);
10312         else if (tg3_flag(tp, USING_MSI))
10313                 pci_disable_msi(tp->pdev);
10314         tg3_flag_clear(tp, USING_MSI);
10315         tg3_flag_clear(tp, USING_MSIX);
10316         tg3_flag_clear(tp, ENABLE_RSS);
10317         tg3_flag_clear(tp, ENABLE_TSS);
10318 }
10319
10320 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10321 {
10322         struct net_device *dev = tp->dev;
10323         int i, err;
10324
10325         /*
10326          * Setup interrupts first so we know how
10327          * many NAPI resources to allocate
10328          */
10329         tg3_ints_init(tp);
10330
10331         tg3_rss_check_indir_tbl(tp);
10332
10333         /* The placement of this call is tied
10334          * to the setup and use of Host TX descriptors.
10335          */
10336         err = tg3_alloc_consistent(tp);
10337         if (err)
10338                 goto err_out1;
10339
10340         tg3_napi_init(tp);
10341
10342         tg3_napi_enable(tp);
10343
10344         for (i = 0; i < tp->irq_cnt; i++) {
10345                 struct tg3_napi *tnapi = &tp->napi[i];
10346                 err = tg3_request_irq(tp, i);
10347                 if (err) {
10348                         for (i--; i >= 0; i--) {
10349                                 tnapi = &tp->napi[i];
10350                                 free_irq(tnapi->irq_vec, tnapi);
10351                         }
10352                         goto err_out2;
10353                 }
10354         }
10355
10356         tg3_full_lock(tp, 0);
10357
10358         err = tg3_init_hw(tp, reset_phy);
10359         if (err) {
10360                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10361                 tg3_free_rings(tp);
10362         }
10363
10364         tg3_full_unlock(tp);
10365
10366         if (err)
10367                 goto err_out3;
10368
10369         if (test_irq && tg3_flag(tp, USING_MSI)) {
10370                 err = tg3_test_msi(tp);
10371
10372                 if (err) {
10373                         tg3_full_lock(tp, 0);
10374                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10375                         tg3_free_rings(tp);
10376                         tg3_full_unlock(tp);
10377
10378                         goto err_out2;
10379                 }
10380
10381                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10382                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10383
10384                         tw32(PCIE_TRANSACTION_CFG,
10385                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10386                 }
10387         }
10388
10389         tg3_phy_start(tp);
10390
10391         tg3_hwmon_open(tp);
10392
10393         tg3_full_lock(tp, 0);
10394
10395         tg3_timer_start(tp);
10396         tg3_flag_set(tp, INIT_COMPLETE);
10397         tg3_enable_ints(tp);
10398
10399         tg3_full_unlock(tp);
10400
10401         netif_tx_start_all_queues(dev);
10402
10403         /*
10404          * Reset loopback feature if it was turned on while the device was down
10405          * make sure that it's installed properly now.
10406          */
10407         if (dev->features & NETIF_F_LOOPBACK)
10408                 tg3_set_loopback(dev, dev->features);
10409
10410         return 0;
10411
10412 err_out3:
10413         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10414                 struct tg3_napi *tnapi = &tp->napi[i];
10415                 free_irq(tnapi->irq_vec, tnapi);
10416         }
10417
10418 err_out2:
10419         tg3_napi_disable(tp);
10420         tg3_napi_fini(tp);
10421         tg3_free_consistent(tp);
10422
10423 err_out1:
10424         tg3_ints_fini(tp);
10425
10426         return err;
10427 }
10428
10429 static void tg3_stop(struct tg3 *tp)
10430 {
10431         int i;
10432
10433         tg3_reset_task_cancel(tp);
10434         tg3_netif_stop(tp);
10435
10436         tg3_timer_stop(tp);
10437
10438         tg3_hwmon_close(tp);
10439
10440         tg3_phy_stop(tp);
10441
10442         tg3_full_lock(tp, 1);
10443
10444         tg3_disable_ints(tp);
10445
10446         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10447         tg3_free_rings(tp);
10448         tg3_flag_clear(tp, INIT_COMPLETE);
10449
10450         tg3_full_unlock(tp);
10451
10452         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10453                 struct tg3_napi *tnapi = &tp->napi[i];
10454                 free_irq(tnapi->irq_vec, tnapi);
10455         }
10456
10457         tg3_ints_fini(tp);
10458
10459         tg3_napi_fini(tp);
10460
10461         tg3_free_consistent(tp);
10462 }
10463
10464 static int tg3_open(struct net_device *dev)
10465 {
10466         struct tg3 *tp = netdev_priv(dev);
10467         int err;
10468
10469         if (tp->fw_needed) {
10470                 err = tg3_request_firmware(tp);
10471                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10472                         if (err)
10473                                 return err;
10474                 } else if (err) {
10475                         netdev_warn(tp->dev, "TSO capability disabled\n");
10476                         tg3_flag_clear(tp, TSO_CAPABLE);
10477                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10478                         netdev_notice(tp->dev, "TSO capability restored\n");
10479                         tg3_flag_set(tp, TSO_CAPABLE);
10480                 }
10481         }
10482
10483         netif_carrier_off(tp->dev);
10484
10485         err = tg3_power_up(tp);
10486         if (err)
10487                 return err;
10488
10489         tg3_full_lock(tp, 0);
10490
10491         tg3_disable_ints(tp);
10492         tg3_flag_clear(tp, INIT_COMPLETE);
10493
10494         tg3_full_unlock(tp);
10495
10496         err = tg3_start(tp, true, true);
10497         if (err) {
10498                 tg3_frob_aux_power(tp, false);
10499                 pci_set_power_state(tp->pdev, PCI_D3hot);
10500         }
10501         return err;
10502 }
10503
10504 static int tg3_close(struct net_device *dev)
10505 {
10506         struct tg3 *tp = netdev_priv(dev);
10507
10508         tg3_stop(tp);
10509
10510         /* Clear stats across close / open calls */
10511         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10512         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10513
10514         tg3_power_down(tp);
10515
10516         netif_carrier_off(tp->dev);
10517
10518         return 0;
10519 }
10520
10521 static inline u64 get_stat64(tg3_stat64_t *val)
10522 {
10523        return ((u64)val->high << 32) | ((u64)val->low);
10524 }
10525
10526 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10527 {
10528         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10529
10530         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10531             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10532              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10533                 u32 val;
10534
10535                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10536                         tg3_writephy(tp, MII_TG3_TEST1,
10537                                      val | MII_TG3_TEST1_CRC_EN);
10538                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10539                 } else
10540                         val = 0;
10541
10542                 tp->phy_crc_errors += val;
10543
10544                 return tp->phy_crc_errors;
10545         }
10546
10547         return get_stat64(&hw_stats->rx_fcs_errors);
10548 }
10549
10550 #define ESTAT_ADD(member) \
10551         estats->member =        old_estats->member + \
10552                                 get_stat64(&hw_stats->member)
10553
10554 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10555 {
10556         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10557         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10558
10559         ESTAT_ADD(rx_octets);
10560         ESTAT_ADD(rx_fragments);
10561         ESTAT_ADD(rx_ucast_packets);
10562         ESTAT_ADD(rx_mcast_packets);
10563         ESTAT_ADD(rx_bcast_packets);
10564         ESTAT_ADD(rx_fcs_errors);
10565         ESTAT_ADD(rx_align_errors);
10566         ESTAT_ADD(rx_xon_pause_rcvd);
10567         ESTAT_ADD(rx_xoff_pause_rcvd);
10568         ESTAT_ADD(rx_mac_ctrl_rcvd);
10569         ESTAT_ADD(rx_xoff_entered);
10570         ESTAT_ADD(rx_frame_too_long_errors);
10571         ESTAT_ADD(rx_jabbers);
10572         ESTAT_ADD(rx_undersize_packets);
10573         ESTAT_ADD(rx_in_length_errors);
10574         ESTAT_ADD(rx_out_length_errors);
10575         ESTAT_ADD(rx_64_or_less_octet_packets);
10576         ESTAT_ADD(rx_65_to_127_octet_packets);
10577         ESTAT_ADD(rx_128_to_255_octet_packets);
10578         ESTAT_ADD(rx_256_to_511_octet_packets);
10579         ESTAT_ADD(rx_512_to_1023_octet_packets);
10580         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10581         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10582         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10583         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10584         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10585
10586         ESTAT_ADD(tx_octets);
10587         ESTAT_ADD(tx_collisions);
10588         ESTAT_ADD(tx_xon_sent);
10589         ESTAT_ADD(tx_xoff_sent);
10590         ESTAT_ADD(tx_flow_control);
10591         ESTAT_ADD(tx_mac_errors);
10592         ESTAT_ADD(tx_single_collisions);
10593         ESTAT_ADD(tx_mult_collisions);
10594         ESTAT_ADD(tx_deferred);
10595         ESTAT_ADD(tx_excessive_collisions);
10596         ESTAT_ADD(tx_late_collisions);
10597         ESTAT_ADD(tx_collide_2times);
10598         ESTAT_ADD(tx_collide_3times);
10599         ESTAT_ADD(tx_collide_4times);
10600         ESTAT_ADD(tx_collide_5times);
10601         ESTAT_ADD(tx_collide_6times);
10602         ESTAT_ADD(tx_collide_7times);
10603         ESTAT_ADD(tx_collide_8times);
10604         ESTAT_ADD(tx_collide_9times);
10605         ESTAT_ADD(tx_collide_10times);
10606         ESTAT_ADD(tx_collide_11times);
10607         ESTAT_ADD(tx_collide_12times);
10608         ESTAT_ADD(tx_collide_13times);
10609         ESTAT_ADD(tx_collide_14times);
10610         ESTAT_ADD(tx_collide_15times);
10611         ESTAT_ADD(tx_ucast_packets);
10612         ESTAT_ADD(tx_mcast_packets);
10613         ESTAT_ADD(tx_bcast_packets);
10614         ESTAT_ADD(tx_carrier_sense_errors);
10615         ESTAT_ADD(tx_discards);
10616         ESTAT_ADD(tx_errors);
10617
10618         ESTAT_ADD(dma_writeq_full);
10619         ESTAT_ADD(dma_write_prioq_full);
10620         ESTAT_ADD(rxbds_empty);
10621         ESTAT_ADD(rx_discards);
10622         ESTAT_ADD(rx_errors);
10623         ESTAT_ADD(rx_threshold_hit);
10624
10625         ESTAT_ADD(dma_readq_full);
10626         ESTAT_ADD(dma_read_prioq_full);
10627         ESTAT_ADD(tx_comp_queue_full);
10628
10629         ESTAT_ADD(ring_set_send_prod_index);
10630         ESTAT_ADD(ring_status_update);
10631         ESTAT_ADD(nic_irqs);
10632         ESTAT_ADD(nic_avoided_irqs);
10633         ESTAT_ADD(nic_tx_threshold_hit);
10634
10635         ESTAT_ADD(mbuf_lwm_thresh_hit);
10636 }
10637
10638 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10639 {
10640         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10641         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10642
10643         stats->rx_packets = old_stats->rx_packets +
10644                 get_stat64(&hw_stats->rx_ucast_packets) +
10645                 get_stat64(&hw_stats->rx_mcast_packets) +
10646                 get_stat64(&hw_stats->rx_bcast_packets);
10647
10648         stats->tx_packets = old_stats->tx_packets +
10649                 get_stat64(&hw_stats->tx_ucast_packets) +
10650                 get_stat64(&hw_stats->tx_mcast_packets) +
10651                 get_stat64(&hw_stats->tx_bcast_packets);
10652
10653         stats->rx_bytes = old_stats->rx_bytes +
10654                 get_stat64(&hw_stats->rx_octets);
10655         stats->tx_bytes = old_stats->tx_bytes +
10656                 get_stat64(&hw_stats->tx_octets);
10657
10658         stats->rx_errors = old_stats->rx_errors +
10659                 get_stat64(&hw_stats->rx_errors);
10660         stats->tx_errors = old_stats->tx_errors +
10661                 get_stat64(&hw_stats->tx_errors) +
10662                 get_stat64(&hw_stats->tx_mac_errors) +
10663                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10664                 get_stat64(&hw_stats->tx_discards);
10665
10666         stats->multicast = old_stats->multicast +
10667                 get_stat64(&hw_stats->rx_mcast_packets);
10668         stats->collisions = old_stats->collisions +
10669                 get_stat64(&hw_stats->tx_collisions);
10670
10671         stats->rx_length_errors = old_stats->rx_length_errors +
10672                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10673                 get_stat64(&hw_stats->rx_undersize_packets);
10674
10675         stats->rx_over_errors = old_stats->rx_over_errors +
10676                 get_stat64(&hw_stats->rxbds_empty);
10677         stats->rx_frame_errors = old_stats->rx_frame_errors +
10678                 get_stat64(&hw_stats->rx_align_errors);
10679         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10680                 get_stat64(&hw_stats->tx_discards);
10681         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10682                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10683
10684         stats->rx_crc_errors = old_stats->rx_crc_errors +
10685                 tg3_calc_crc_errors(tp);
10686
10687         stats->rx_missed_errors = old_stats->rx_missed_errors +
10688                 get_stat64(&hw_stats->rx_discards);
10689
10690         stats->rx_dropped = tp->rx_dropped;
10691         stats->tx_dropped = tp->tx_dropped;
10692 }
10693
10694 static int tg3_get_regs_len(struct net_device *dev)
10695 {
10696         return TG3_REG_BLK_SIZE;
10697 }
10698
10699 static void tg3_get_regs(struct net_device *dev,
10700                 struct ethtool_regs *regs, void *_p)
10701 {
10702         struct tg3 *tp = netdev_priv(dev);
10703
10704         regs->version = 0;
10705
10706         memset(_p, 0, TG3_REG_BLK_SIZE);
10707
10708         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10709                 return;
10710
10711         tg3_full_lock(tp, 0);
10712
10713         tg3_dump_legacy_regs(tp, (u32 *)_p);
10714
10715         tg3_full_unlock(tp);
10716 }
10717
10718 static int tg3_get_eeprom_len(struct net_device *dev)
10719 {
10720         struct tg3 *tp = netdev_priv(dev);
10721
10722         return tp->nvram_size;
10723 }
10724
10725 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10726 {
10727         struct tg3 *tp = netdev_priv(dev);
10728         int ret;
10729         u8  *pd;
10730         u32 i, offset, len, b_offset, b_count;
10731         __be32 val;
10732
10733         if (tg3_flag(tp, NO_NVRAM))
10734                 return -EINVAL;
10735
10736         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10737                 return -EAGAIN;
10738
10739         offset = eeprom->offset;
10740         len = eeprom->len;
10741         eeprom->len = 0;
10742
10743         eeprom->magic = TG3_EEPROM_MAGIC;
10744
10745         if (offset & 3) {
10746                 /* adjustments to start on required 4 byte boundary */
10747                 b_offset = offset & 3;
10748                 b_count = 4 - b_offset;
10749                 if (b_count > len) {
10750                         /* i.e. offset=1 len=2 */
10751                         b_count = len;
10752                 }
10753                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10754                 if (ret)
10755                         return ret;
10756                 memcpy(data, ((char *)&val) + b_offset, b_count);
10757                 len -= b_count;
10758                 offset += b_count;
10759                 eeprom->len += b_count;
10760         }
10761
10762         /* read bytes up to the last 4 byte boundary */
10763         pd = &data[eeprom->len];
10764         for (i = 0; i < (len - (len & 3)); i += 4) {
10765                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10766                 if (ret) {
10767                         eeprom->len += i;
10768                         return ret;
10769                 }
10770                 memcpy(pd + i, &val, 4);
10771         }
10772         eeprom->len += i;
10773
10774         if (len & 3) {
10775                 /* read last bytes not ending on 4 byte boundary */
10776                 pd = &data[eeprom->len];
10777                 b_count = len & 3;
10778                 b_offset = offset + len - b_count;
10779                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10780                 if (ret)
10781                         return ret;
10782                 memcpy(pd, &val, b_count);
10783                 eeprom->len += b_count;
10784         }
10785         return 0;
10786 }
10787
10788 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10789 {
10790         struct tg3 *tp = netdev_priv(dev);
10791         int ret;
10792         u32 offset, len, b_offset, odd_len;
10793         u8 *buf;
10794         __be32 start, end;
10795
10796         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10797                 return -EAGAIN;
10798
10799         if (tg3_flag(tp, NO_NVRAM) ||
10800             eeprom->magic != TG3_EEPROM_MAGIC)
10801                 return -EINVAL;
10802
10803         offset = eeprom->offset;
10804         len = eeprom->len;
10805
10806         if ((b_offset = (offset & 3))) {
10807                 /* adjustments to start on required 4 byte boundary */
10808                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10809                 if (ret)
10810                         return ret;
10811                 len += b_offset;
10812                 offset &= ~3;
10813                 if (len < 4)
10814                         len = 4;
10815         }
10816
10817         odd_len = 0;
10818         if (len & 3) {
10819                 /* adjustments to end on required 4 byte boundary */
10820                 odd_len = 1;
10821                 len = (len + 3) & ~3;
10822                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10823                 if (ret)
10824                         return ret;
10825         }
10826
10827         buf = data;
10828         if (b_offset || odd_len) {
10829                 buf = kmalloc(len, GFP_KERNEL);
10830                 if (!buf)
10831                         return -ENOMEM;
10832                 if (b_offset)
10833                         memcpy(buf, &start, 4);
10834                 if (odd_len)
10835                         memcpy(buf+len-4, &end, 4);
10836                 memcpy(buf + b_offset, data, eeprom->len);
10837         }
10838
10839         ret = tg3_nvram_write_block(tp, offset, len, buf);
10840
10841         if (buf != data)
10842                 kfree(buf);
10843
10844         return ret;
10845 }
10846
10847 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10848 {
10849         struct tg3 *tp = netdev_priv(dev);
10850
10851         if (tg3_flag(tp, USE_PHYLIB)) {
10852                 struct phy_device *phydev;
10853                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10854                         return -EAGAIN;
10855                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10856                 return phy_ethtool_gset(phydev, cmd);
10857         }
10858
10859         cmd->supported = (SUPPORTED_Autoneg);
10860
10861         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10862                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10863                                    SUPPORTED_1000baseT_Full);
10864
10865         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10866                 cmd->supported |= (SUPPORTED_100baseT_Half |
10867                                   SUPPORTED_100baseT_Full |
10868                                   SUPPORTED_10baseT_Half |
10869                                   SUPPORTED_10baseT_Full |
10870                                   SUPPORTED_TP);
10871                 cmd->port = PORT_TP;
10872         } else {
10873                 cmd->supported |= SUPPORTED_FIBRE;
10874                 cmd->port = PORT_FIBRE;
10875         }
10876
10877         cmd->advertising = tp->link_config.advertising;
10878         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10879                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10880                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10881                                 cmd->advertising |= ADVERTISED_Pause;
10882                         } else {
10883                                 cmd->advertising |= ADVERTISED_Pause |
10884                                                     ADVERTISED_Asym_Pause;
10885                         }
10886                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10887                         cmd->advertising |= ADVERTISED_Asym_Pause;
10888                 }
10889         }
10890         if (netif_running(dev) && netif_carrier_ok(dev)) {
10891                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10892                 cmd->duplex = tp->link_config.active_duplex;
10893                 cmd->lp_advertising = tp->link_config.rmt_adv;
10894                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10895                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10896                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10897                         else
10898                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10899                 }
10900         } else {
10901                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10902                 cmd->duplex = DUPLEX_UNKNOWN;
10903                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10904         }
10905         cmd->phy_address = tp->phy_addr;
10906         cmd->transceiver = XCVR_INTERNAL;
10907         cmd->autoneg = tp->link_config.autoneg;
10908         cmd->maxtxpkt = 0;
10909         cmd->maxrxpkt = 0;
10910         return 0;
10911 }
10912
10913 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10914 {
10915         struct tg3 *tp = netdev_priv(dev);
10916         u32 speed = ethtool_cmd_speed(cmd);
10917
10918         if (tg3_flag(tp, USE_PHYLIB)) {
10919                 struct phy_device *phydev;
10920                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10921                         return -EAGAIN;
10922                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10923                 return phy_ethtool_sset(phydev, cmd);
10924         }
10925
10926         if (cmd->autoneg != AUTONEG_ENABLE &&
10927             cmd->autoneg != AUTONEG_DISABLE)
10928                 return -EINVAL;
10929
10930         if (cmd->autoneg == AUTONEG_DISABLE &&
10931             cmd->duplex != DUPLEX_FULL &&
10932             cmd->duplex != DUPLEX_HALF)
10933                 return -EINVAL;
10934
10935         if (cmd->autoneg == AUTONEG_ENABLE) {
10936                 u32 mask = ADVERTISED_Autoneg |
10937                            ADVERTISED_Pause |
10938                            ADVERTISED_Asym_Pause;
10939
10940                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10941                         mask |= ADVERTISED_1000baseT_Half |
10942                                 ADVERTISED_1000baseT_Full;
10943
10944                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10945                         mask |= ADVERTISED_100baseT_Half |
10946                                 ADVERTISED_100baseT_Full |
10947                                 ADVERTISED_10baseT_Half |
10948                                 ADVERTISED_10baseT_Full |
10949                                 ADVERTISED_TP;
10950                 else
10951                         mask |= ADVERTISED_FIBRE;
10952
10953                 if (cmd->advertising & ~mask)
10954                         return -EINVAL;
10955
10956                 mask &= (ADVERTISED_1000baseT_Half |
10957                          ADVERTISED_1000baseT_Full |
10958                          ADVERTISED_100baseT_Half |
10959                          ADVERTISED_100baseT_Full |
10960                          ADVERTISED_10baseT_Half |
10961                          ADVERTISED_10baseT_Full);
10962
10963                 cmd->advertising &= mask;
10964         } else {
10965                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10966                         if (speed != SPEED_1000)
10967                                 return -EINVAL;
10968
10969                         if (cmd->duplex != DUPLEX_FULL)
10970                                 return -EINVAL;
10971                 } else {
10972                         if (speed != SPEED_100 &&
10973                             speed != SPEED_10)
10974                                 return -EINVAL;
10975                 }
10976         }
10977
10978         tg3_full_lock(tp, 0);
10979
10980         tp->link_config.autoneg = cmd->autoneg;
10981         if (cmd->autoneg == AUTONEG_ENABLE) {
10982                 tp->link_config.advertising = (cmd->advertising |
10983                                               ADVERTISED_Autoneg);
10984                 tp->link_config.speed = SPEED_UNKNOWN;
10985                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10986         } else {
10987                 tp->link_config.advertising = 0;
10988                 tp->link_config.speed = speed;
10989                 tp->link_config.duplex = cmd->duplex;
10990         }
10991
10992         if (netif_running(dev))
10993                 tg3_setup_phy(tp, 1);
10994
10995         tg3_full_unlock(tp);
10996
10997         return 0;
10998 }
10999
11000 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11001 {
11002         struct tg3 *tp = netdev_priv(dev);
11003
11004         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11005         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11006         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11007         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11008 }
11009
11010 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11011 {
11012         struct tg3 *tp = netdev_priv(dev);
11013
11014         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11015                 wol->supported = WAKE_MAGIC;
11016         else
11017                 wol->supported = 0;
11018         wol->wolopts = 0;
11019         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11020                 wol->wolopts = WAKE_MAGIC;
11021         memset(&wol->sopass, 0, sizeof(wol->sopass));
11022 }
11023
11024 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11025 {
11026         struct tg3 *tp = netdev_priv(dev);
11027         struct device *dp = &tp->pdev->dev;
11028
11029         if (wol->wolopts & ~WAKE_MAGIC)
11030                 return -EINVAL;
11031         if ((wol->wolopts & WAKE_MAGIC) &&
11032             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11033                 return -EINVAL;
11034
11035         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11036
11037         spin_lock_bh(&tp->lock);
11038         if (device_may_wakeup(dp))
11039                 tg3_flag_set(tp, WOL_ENABLE);
11040         else
11041                 tg3_flag_clear(tp, WOL_ENABLE);
11042         spin_unlock_bh(&tp->lock);
11043
11044         return 0;
11045 }
11046
11047 static u32 tg3_get_msglevel(struct net_device *dev)
11048 {
11049         struct tg3 *tp = netdev_priv(dev);
11050         return tp->msg_enable;
11051 }
11052
11053 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11054 {
11055         struct tg3 *tp = netdev_priv(dev);
11056         tp->msg_enable = value;
11057 }
11058
11059 static int tg3_nway_reset(struct net_device *dev)
11060 {
11061         struct tg3 *tp = netdev_priv(dev);
11062         int r;
11063
11064         if (!netif_running(dev))
11065                 return -EAGAIN;
11066
11067         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11068                 return -EINVAL;
11069
11070         if (tg3_flag(tp, USE_PHYLIB)) {
11071                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11072                         return -EAGAIN;
11073                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11074         } else {
11075                 u32 bmcr;
11076
11077                 spin_lock_bh(&tp->lock);
11078                 r = -EINVAL;
11079                 tg3_readphy(tp, MII_BMCR, &bmcr);
11080                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11081                     ((bmcr & BMCR_ANENABLE) ||
11082                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11083                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11084                                                    BMCR_ANENABLE);
11085                         r = 0;
11086                 }
11087                 spin_unlock_bh(&tp->lock);
11088         }
11089
11090         return r;
11091 }
11092
11093 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11094 {
11095         struct tg3 *tp = netdev_priv(dev);
11096
11097         ering->rx_max_pending = tp->rx_std_ring_mask;
11098         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11099                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11100         else
11101                 ering->rx_jumbo_max_pending = 0;
11102
11103         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11104
11105         ering->rx_pending = tp->rx_pending;
11106         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11107                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11108         else
11109                 ering->rx_jumbo_pending = 0;
11110
11111         ering->tx_pending = tp->napi[0].tx_pending;
11112 }
11113
11114 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11115 {
11116         struct tg3 *tp = netdev_priv(dev);
11117         int i, irq_sync = 0, err = 0;
11118
11119         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11120             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11121             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11122             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11123             (tg3_flag(tp, TSO_BUG) &&
11124              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11125                 return -EINVAL;
11126
11127         if (netif_running(dev)) {
11128                 tg3_phy_stop(tp);
11129                 tg3_netif_stop(tp);
11130                 irq_sync = 1;
11131         }
11132
11133         tg3_full_lock(tp, irq_sync);
11134
11135         tp->rx_pending = ering->rx_pending;
11136
11137         if (tg3_flag(tp, MAX_RXPEND_64) &&
11138             tp->rx_pending > 63)
11139                 tp->rx_pending = 63;
11140         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11141
11142         for (i = 0; i < tp->irq_max; i++)
11143                 tp->napi[i].tx_pending = ering->tx_pending;
11144
11145         if (netif_running(dev)) {
11146                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11147                 err = tg3_restart_hw(tp, 1);
11148                 if (!err)
11149                         tg3_netif_start(tp);
11150         }
11151
11152         tg3_full_unlock(tp);
11153
11154         if (irq_sync && !err)
11155                 tg3_phy_start(tp);
11156
11157         return err;
11158 }
11159
11160 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11161 {
11162         struct tg3 *tp = netdev_priv(dev);
11163
11164         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11165
11166         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11167                 epause->rx_pause = 1;
11168         else
11169                 epause->rx_pause = 0;
11170
11171         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11172                 epause->tx_pause = 1;
11173         else
11174                 epause->tx_pause = 0;
11175 }
11176
11177 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11178 {
11179         struct tg3 *tp = netdev_priv(dev);
11180         int err = 0;
11181
11182         if (tg3_flag(tp, USE_PHYLIB)) {
11183                 u32 newadv;
11184                 struct phy_device *phydev;
11185
11186                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11187
11188                 if (!(phydev->supported & SUPPORTED_Pause) ||
11189                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11190                      (epause->rx_pause != epause->tx_pause)))
11191                         return -EINVAL;
11192
11193                 tp->link_config.flowctrl = 0;
11194                 if (epause->rx_pause) {
11195                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11196
11197                         if (epause->tx_pause) {
11198                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11199                                 newadv = ADVERTISED_Pause;
11200                         } else
11201                                 newadv = ADVERTISED_Pause |
11202                                          ADVERTISED_Asym_Pause;
11203                 } else if (epause->tx_pause) {
11204                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11205                         newadv = ADVERTISED_Asym_Pause;
11206                 } else
11207                         newadv = 0;
11208
11209                 if (epause->autoneg)
11210                         tg3_flag_set(tp, PAUSE_AUTONEG);
11211                 else
11212                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11213
11214                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11215                         u32 oldadv = phydev->advertising &
11216                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11217                         if (oldadv != newadv) {
11218                                 phydev->advertising &=
11219                                         ~(ADVERTISED_Pause |
11220                                           ADVERTISED_Asym_Pause);
11221                                 phydev->advertising |= newadv;
11222                                 if (phydev->autoneg) {
11223                                         /*
11224                                          * Always renegotiate the link to
11225                                          * inform our link partner of our
11226                                          * flow control settings, even if the
11227                                          * flow control is forced.  Let
11228                                          * tg3_adjust_link() do the final
11229                                          * flow control setup.
11230                                          */
11231                                         return phy_start_aneg(phydev);
11232                                 }
11233                         }
11234
11235                         if (!epause->autoneg)
11236                                 tg3_setup_flow_control(tp, 0, 0);
11237                 } else {
11238                         tp->link_config.advertising &=
11239                                         ~(ADVERTISED_Pause |
11240                                           ADVERTISED_Asym_Pause);
11241                         tp->link_config.advertising |= newadv;
11242                 }
11243         } else {
11244                 int irq_sync = 0;
11245
11246                 if (netif_running(dev)) {
11247                         tg3_netif_stop(tp);
11248                         irq_sync = 1;
11249                 }
11250
11251                 tg3_full_lock(tp, irq_sync);
11252
11253                 if (epause->autoneg)
11254                         tg3_flag_set(tp, PAUSE_AUTONEG);
11255                 else
11256                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11257                 if (epause->rx_pause)
11258                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11259                 else
11260                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11261                 if (epause->tx_pause)
11262                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11263                 else
11264                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11265
11266                 if (netif_running(dev)) {
11267                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11268                         err = tg3_restart_hw(tp, 1);
11269                         if (!err)
11270                                 tg3_netif_start(tp);
11271                 }
11272
11273                 tg3_full_unlock(tp);
11274         }
11275
11276         return err;
11277 }
11278
11279 static int tg3_get_sset_count(struct net_device *dev, int sset)
11280 {
11281         switch (sset) {
11282         case ETH_SS_TEST:
11283                 return TG3_NUM_TEST;
11284         case ETH_SS_STATS:
11285                 return TG3_NUM_STATS;
11286         default:
11287                 return -EOPNOTSUPP;
11288         }
11289 }
11290
11291 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11292                          u32 *rules __always_unused)
11293 {
11294         struct tg3 *tp = netdev_priv(dev);
11295
11296         if (!tg3_flag(tp, SUPPORT_MSIX))
11297                 return -EOPNOTSUPP;
11298
11299         switch (info->cmd) {
11300         case ETHTOOL_GRXRINGS:
11301                 if (netif_running(tp->dev))
11302                         info->data = tp->rxq_cnt;
11303                 else {
11304                         info->data = num_online_cpus();
11305                         if (info->data > TG3_RSS_MAX_NUM_QS)
11306                                 info->data = TG3_RSS_MAX_NUM_QS;
11307                 }
11308
11309                 /* The first interrupt vector only
11310                  * handles link interrupts.
11311                  */
11312                 info->data -= 1;
11313                 return 0;
11314
11315         default:
11316                 return -EOPNOTSUPP;
11317         }
11318 }
11319
11320 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11321 {
11322         u32 size = 0;
11323         struct tg3 *tp = netdev_priv(dev);
11324
11325         if (tg3_flag(tp, SUPPORT_MSIX))
11326                 size = TG3_RSS_INDIR_TBL_SIZE;
11327
11328         return size;
11329 }
11330
11331 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11332 {
11333         struct tg3 *tp = netdev_priv(dev);
11334         int i;
11335
11336         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11337                 indir[i] = tp->rss_ind_tbl[i];
11338
11339         return 0;
11340 }
11341
11342 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11343 {
11344         struct tg3 *tp = netdev_priv(dev);
11345         size_t i;
11346
11347         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11348                 tp->rss_ind_tbl[i] = indir[i];
11349
11350         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11351                 return 0;
11352
11353         /* It is legal to write the indirection
11354          * table while the device is running.
11355          */
11356         tg3_full_lock(tp, 0);
11357         tg3_rss_write_indir_tbl(tp);
11358         tg3_full_unlock(tp);
11359
11360         return 0;
11361 }
11362
11363 static void tg3_get_channels(struct net_device *dev,
11364                              struct ethtool_channels *channel)
11365 {
11366         struct tg3 *tp = netdev_priv(dev);
11367         u32 deflt_qs = netif_get_num_default_rss_queues();
11368
11369         channel->max_rx = tp->rxq_max;
11370         channel->max_tx = tp->txq_max;
11371
11372         if (netif_running(dev)) {
11373                 channel->rx_count = tp->rxq_cnt;
11374                 channel->tx_count = tp->txq_cnt;
11375         } else {
11376                 if (tp->rxq_req)
11377                         channel->rx_count = tp->rxq_req;
11378                 else
11379                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11380
11381                 if (tp->txq_req)
11382                         channel->tx_count = tp->txq_req;
11383                 else
11384                         channel->tx_count = min(deflt_qs, tp->txq_max);
11385         }
11386 }
11387
11388 static int tg3_set_channels(struct net_device *dev,
11389                             struct ethtool_channels *channel)
11390 {
11391         struct tg3 *tp = netdev_priv(dev);
11392
11393         if (!tg3_flag(tp, SUPPORT_MSIX))
11394                 return -EOPNOTSUPP;
11395
11396         if (channel->rx_count > tp->rxq_max ||
11397             channel->tx_count > tp->txq_max)
11398                 return -EINVAL;
11399
11400         tp->rxq_req = channel->rx_count;
11401         tp->txq_req = channel->tx_count;
11402
11403         if (!netif_running(dev))
11404                 return 0;
11405
11406         tg3_stop(tp);
11407
11408         netif_carrier_off(dev);
11409
11410         tg3_start(tp, true, false);
11411
11412         return 0;
11413 }
11414
11415 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11416 {
11417         switch (stringset) {
11418         case ETH_SS_STATS:
11419                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11420                 break;
11421         case ETH_SS_TEST:
11422                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11423                 break;
11424         default:
11425                 WARN_ON(1);     /* we need a WARN() */
11426                 break;
11427         }
11428 }
11429
11430 static int tg3_set_phys_id(struct net_device *dev,
11431                             enum ethtool_phys_id_state state)
11432 {
11433         struct tg3 *tp = netdev_priv(dev);
11434
11435         if (!netif_running(tp->dev))
11436                 return -EAGAIN;
11437
11438         switch (state) {
11439         case ETHTOOL_ID_ACTIVE:
11440                 return 1;       /* cycle on/off once per second */
11441
11442         case ETHTOOL_ID_ON:
11443                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11444                      LED_CTRL_1000MBPS_ON |
11445                      LED_CTRL_100MBPS_ON |
11446                      LED_CTRL_10MBPS_ON |
11447                      LED_CTRL_TRAFFIC_OVERRIDE |
11448                      LED_CTRL_TRAFFIC_BLINK |
11449                      LED_CTRL_TRAFFIC_LED);
11450                 break;
11451
11452         case ETHTOOL_ID_OFF:
11453                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11454                      LED_CTRL_TRAFFIC_OVERRIDE);
11455                 break;
11456
11457         case ETHTOOL_ID_INACTIVE:
11458                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11459                 break;
11460         }
11461
11462         return 0;
11463 }
11464
11465 static void tg3_get_ethtool_stats(struct net_device *dev,
11466                                    struct ethtool_stats *estats, u64 *tmp_stats)
11467 {
11468         struct tg3 *tp = netdev_priv(dev);
11469
11470         if (tp->hw_stats)
11471                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11472         else
11473                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11474 }
11475
11476 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11477 {
11478         int i;
11479         __be32 *buf;
11480         u32 offset = 0, len = 0;
11481         u32 magic, val;
11482
11483         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11484                 return NULL;
11485
11486         if (magic == TG3_EEPROM_MAGIC) {
11487                 for (offset = TG3_NVM_DIR_START;
11488                      offset < TG3_NVM_DIR_END;
11489                      offset += TG3_NVM_DIRENT_SIZE) {
11490                         if (tg3_nvram_read(tp, offset, &val))
11491                                 return NULL;
11492
11493                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11494                             TG3_NVM_DIRTYPE_EXTVPD)
11495                                 break;
11496                 }
11497
11498                 if (offset != TG3_NVM_DIR_END) {
11499                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11500                         if (tg3_nvram_read(tp, offset + 4, &offset))
11501                                 return NULL;
11502
11503                         offset = tg3_nvram_logical_addr(tp, offset);
11504                 }
11505         }
11506
11507         if (!offset || !len) {
11508                 offset = TG3_NVM_VPD_OFF;
11509                 len = TG3_NVM_VPD_LEN;
11510         }
11511
11512         buf = kmalloc(len, GFP_KERNEL);
11513         if (buf == NULL)
11514                 return NULL;
11515
11516         if (magic == TG3_EEPROM_MAGIC) {
11517                 for (i = 0; i < len; i += 4) {
11518                         /* The data is in little-endian format in NVRAM.
11519                          * Use the big-endian read routines to preserve
11520                          * the byte order as it exists in NVRAM.
11521                          */
11522                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11523                                 goto error;
11524                 }
11525         } else {
11526                 u8 *ptr;
11527                 ssize_t cnt;
11528                 unsigned int pos = 0;
11529
11530                 ptr = (u8 *)&buf[0];
11531                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11532                         cnt = pci_read_vpd(tp->pdev, pos,
11533                                            len - pos, ptr);
11534                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11535                                 cnt = 0;
11536                         else if (cnt < 0)
11537                                 goto error;
11538                 }
11539                 if (pos != len)
11540                         goto error;
11541         }
11542
11543         *vpdlen = len;
11544
11545         return buf;
11546
11547 error:
11548         kfree(buf);
11549         return NULL;
11550 }
11551
11552 #define NVRAM_TEST_SIZE 0x100
11553 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11554 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11555 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11556 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11557 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11558 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11559 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11560 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11561
11562 static int tg3_test_nvram(struct tg3 *tp)
11563 {
11564         u32 csum, magic, len;
11565         __be32 *buf;
11566         int i, j, k, err = 0, size;
11567
11568         if (tg3_flag(tp, NO_NVRAM))
11569                 return 0;
11570
11571         if (tg3_nvram_read(tp, 0, &magic) != 0)
11572                 return -EIO;
11573
11574         if (magic == TG3_EEPROM_MAGIC)
11575                 size = NVRAM_TEST_SIZE;
11576         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11577                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11578                     TG3_EEPROM_SB_FORMAT_1) {
11579                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11580                         case TG3_EEPROM_SB_REVISION_0:
11581                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11582                                 break;
11583                         case TG3_EEPROM_SB_REVISION_2:
11584                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11585                                 break;
11586                         case TG3_EEPROM_SB_REVISION_3:
11587                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11588                                 break;
11589                         case TG3_EEPROM_SB_REVISION_4:
11590                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11591                                 break;
11592                         case TG3_EEPROM_SB_REVISION_5:
11593                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11594                                 break;
11595                         case TG3_EEPROM_SB_REVISION_6:
11596                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11597                                 break;
11598                         default:
11599                                 return -EIO;
11600                         }
11601                 } else
11602                         return 0;
11603         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11604                 size = NVRAM_SELFBOOT_HW_SIZE;
11605         else
11606                 return -EIO;
11607
11608         buf = kmalloc(size, GFP_KERNEL);
11609         if (buf == NULL)
11610                 return -ENOMEM;
11611
11612         err = -EIO;
11613         for (i = 0, j = 0; i < size; i += 4, j++) {
11614                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11615                 if (err)
11616                         break;
11617         }
11618         if (i < size)
11619                 goto out;
11620
11621         /* Selfboot format */
11622         magic = be32_to_cpu(buf[0]);
11623         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11624             TG3_EEPROM_MAGIC_FW) {
11625                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11626
11627                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11628                     TG3_EEPROM_SB_REVISION_2) {
11629                         /* For rev 2, the csum doesn't include the MBA. */
11630                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11631                                 csum8 += buf8[i];
11632                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11633                                 csum8 += buf8[i];
11634                 } else {
11635                         for (i = 0; i < size; i++)
11636                                 csum8 += buf8[i];
11637                 }
11638
11639                 if (csum8 == 0) {
11640                         err = 0;
11641                         goto out;
11642                 }
11643
11644                 err = -EIO;
11645                 goto out;
11646         }
11647
11648         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11649             TG3_EEPROM_MAGIC_HW) {
11650                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11651                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11652                 u8 *buf8 = (u8 *) buf;
11653
11654                 /* Separate the parity bits and the data bytes.  */
11655                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11656                         if ((i == 0) || (i == 8)) {
11657                                 int l;
11658                                 u8 msk;
11659
11660                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11661                                         parity[k++] = buf8[i] & msk;
11662                                 i++;
11663                         } else if (i == 16) {
11664                                 int l;
11665                                 u8 msk;
11666
11667                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11668                                         parity[k++] = buf8[i] & msk;
11669                                 i++;
11670
11671                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11672                                         parity[k++] = buf8[i] & msk;
11673                                 i++;
11674                         }
11675                         data[j++] = buf8[i];
11676                 }
11677
11678                 err = -EIO;
11679                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11680                         u8 hw8 = hweight8(data[i]);
11681
11682                         if ((hw8 & 0x1) && parity[i])
11683                                 goto out;
11684                         else if (!(hw8 & 0x1) && !parity[i])
11685                                 goto out;
11686                 }
11687                 err = 0;
11688                 goto out;
11689         }
11690
11691         err = -EIO;
11692
11693         /* Bootstrap checksum at offset 0x10 */
11694         csum = calc_crc((unsigned char *) buf, 0x10);
11695         if (csum != le32_to_cpu(buf[0x10/4]))
11696                 goto out;
11697
11698         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11699         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11700         if (csum != le32_to_cpu(buf[0xfc/4]))
11701                 goto out;
11702
11703         kfree(buf);
11704
11705         buf = tg3_vpd_readblock(tp, &len);
11706         if (!buf)
11707                 return -ENOMEM;
11708
11709         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11710         if (i > 0) {
11711                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11712                 if (j < 0)
11713                         goto out;
11714
11715                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11716                         goto out;
11717
11718                 i += PCI_VPD_LRDT_TAG_SIZE;
11719                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11720                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11721                 if (j > 0) {
11722                         u8 csum8 = 0;
11723
11724                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11725
11726                         for (i = 0; i <= j; i++)
11727                                 csum8 += ((u8 *)buf)[i];
11728
11729                         if (csum8)
11730                                 goto out;
11731                 }
11732         }
11733
11734         err = 0;
11735
11736 out:
11737         kfree(buf);
11738         return err;
11739 }
11740
11741 #define TG3_SERDES_TIMEOUT_SEC  2
11742 #define TG3_COPPER_TIMEOUT_SEC  6
11743
11744 static int tg3_test_link(struct tg3 *tp)
11745 {
11746         int i, max;
11747
11748         if (!netif_running(tp->dev))
11749                 return -ENODEV;
11750
11751         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11752                 max = TG3_SERDES_TIMEOUT_SEC;
11753         else
11754                 max = TG3_COPPER_TIMEOUT_SEC;
11755
11756         for (i = 0; i < max; i++) {
11757                 if (netif_carrier_ok(tp->dev))
11758                         return 0;
11759
11760                 if (msleep_interruptible(1000))
11761                         break;
11762         }
11763
11764         return -EIO;
11765 }
11766
11767 /* Only test the commonly used registers */
11768 static int tg3_test_registers(struct tg3 *tp)
11769 {
11770         int i, is_5705, is_5750;
11771         u32 offset, read_mask, write_mask, val, save_val, read_val;
11772         static struct {
11773                 u16 offset;
11774                 u16 flags;
11775 #define TG3_FL_5705     0x1
11776 #define TG3_FL_NOT_5705 0x2
11777 #define TG3_FL_NOT_5788 0x4
11778 #define TG3_FL_NOT_5750 0x8
11779                 u32 read_mask;
11780                 u32 write_mask;
11781         } reg_tbl[] = {
11782                 /* MAC Control Registers */
11783                 { MAC_MODE, TG3_FL_NOT_5705,
11784                         0x00000000, 0x00ef6f8c },
11785                 { MAC_MODE, TG3_FL_5705,
11786                         0x00000000, 0x01ef6b8c },
11787                 { MAC_STATUS, TG3_FL_NOT_5705,
11788                         0x03800107, 0x00000000 },
11789                 { MAC_STATUS, TG3_FL_5705,
11790                         0x03800100, 0x00000000 },
11791                 { MAC_ADDR_0_HIGH, 0x0000,
11792                         0x00000000, 0x0000ffff },
11793                 { MAC_ADDR_0_LOW, 0x0000,
11794                         0x00000000, 0xffffffff },
11795                 { MAC_RX_MTU_SIZE, 0x0000,
11796                         0x00000000, 0x0000ffff },
11797                 { MAC_TX_MODE, 0x0000,
11798                         0x00000000, 0x00000070 },
11799                 { MAC_TX_LENGTHS, 0x0000,
11800                         0x00000000, 0x00003fff },
11801                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11802                         0x00000000, 0x000007fc },
11803                 { MAC_RX_MODE, TG3_FL_5705,
11804                         0x00000000, 0x000007dc },
11805                 { MAC_HASH_REG_0, 0x0000,
11806                         0x00000000, 0xffffffff },
11807                 { MAC_HASH_REG_1, 0x0000,
11808                         0x00000000, 0xffffffff },
11809                 { MAC_HASH_REG_2, 0x0000,
11810                         0x00000000, 0xffffffff },
11811                 { MAC_HASH_REG_3, 0x0000,
11812                         0x00000000, 0xffffffff },
11813
11814                 /* Receive Data and Receive BD Initiator Control Registers. */
11815                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11816                         0x00000000, 0xffffffff },
11817                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11818                         0x00000000, 0xffffffff },
11819                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11820                         0x00000000, 0x00000003 },
11821                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11822                         0x00000000, 0xffffffff },
11823                 { RCVDBDI_STD_BD+0, 0x0000,
11824                         0x00000000, 0xffffffff },
11825                 { RCVDBDI_STD_BD+4, 0x0000,
11826                         0x00000000, 0xffffffff },
11827                 { RCVDBDI_STD_BD+8, 0x0000,
11828                         0x00000000, 0xffff0002 },
11829                 { RCVDBDI_STD_BD+0xc, 0x0000,
11830                         0x00000000, 0xffffffff },
11831
11832                 /* Receive BD Initiator Control Registers. */
11833                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11834                         0x00000000, 0xffffffff },
11835                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11836                         0x00000000, 0x000003ff },
11837                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11838                         0x00000000, 0xffffffff },
11839
11840                 /* Host Coalescing Control Registers. */
11841                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11842                         0x00000000, 0x00000004 },
11843                 { HOSTCC_MODE, TG3_FL_5705,
11844                         0x00000000, 0x000000f6 },
11845                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11846                         0x00000000, 0xffffffff },
11847                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11848                         0x00000000, 0x000003ff },
11849                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11850                         0x00000000, 0xffffffff },
11851                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11852                         0x00000000, 0x000003ff },
11853                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11854                         0x00000000, 0xffffffff },
11855                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11856                         0x00000000, 0x000000ff },
11857                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11858                         0x00000000, 0xffffffff },
11859                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11860                         0x00000000, 0x000000ff },
11861                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11862                         0x00000000, 0xffffffff },
11863                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11864                         0x00000000, 0xffffffff },
11865                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11866                         0x00000000, 0xffffffff },
11867                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11868                         0x00000000, 0x000000ff },
11869                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11870                         0x00000000, 0xffffffff },
11871                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11872                         0x00000000, 0x000000ff },
11873                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11874                         0x00000000, 0xffffffff },
11875                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11876                         0x00000000, 0xffffffff },
11877                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11878                         0x00000000, 0xffffffff },
11879                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11880                         0x00000000, 0xffffffff },
11881                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11882                         0x00000000, 0xffffffff },
11883                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11884                         0xffffffff, 0x00000000 },
11885                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11886                         0xffffffff, 0x00000000 },
11887
11888                 /* Buffer Manager Control Registers. */
11889                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11890                         0x00000000, 0x007fff80 },
11891                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11892                         0x00000000, 0x007fffff },
11893                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11894                         0x00000000, 0x0000003f },
11895                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11896                         0x00000000, 0x000001ff },
11897                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11898                         0x00000000, 0x000001ff },
11899                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11900                         0xffffffff, 0x00000000 },
11901                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11902                         0xffffffff, 0x00000000 },
11903
11904                 /* Mailbox Registers */
11905                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11906                         0x00000000, 0x000001ff },
11907                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11908                         0x00000000, 0x000001ff },
11909                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11910                         0x00000000, 0x000007ff },
11911                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11912                         0x00000000, 0x000001ff },
11913
11914                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11915         };
11916
11917         is_5705 = is_5750 = 0;
11918         if (tg3_flag(tp, 5705_PLUS)) {
11919                 is_5705 = 1;
11920                 if (tg3_flag(tp, 5750_PLUS))
11921                         is_5750 = 1;
11922         }
11923
11924         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11925                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11926                         continue;
11927
11928                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11929                         continue;
11930
11931                 if (tg3_flag(tp, IS_5788) &&
11932                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11933                         continue;
11934
11935                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11936                         continue;
11937
11938                 offset = (u32) reg_tbl[i].offset;
11939                 read_mask = reg_tbl[i].read_mask;
11940                 write_mask = reg_tbl[i].write_mask;
11941
11942                 /* Save the original register content */
11943                 save_val = tr32(offset);
11944
11945                 /* Determine the read-only value. */
11946                 read_val = save_val & read_mask;
11947
11948                 /* Write zero to the register, then make sure the read-only bits
11949                  * are not changed and the read/write bits are all zeros.
11950                  */
11951                 tw32(offset, 0);
11952
11953                 val = tr32(offset);
11954
11955                 /* Test the read-only and read/write bits. */
11956                 if (((val & read_mask) != read_val) || (val & write_mask))
11957                         goto out;
11958
11959                 /* Write ones to all the bits defined by RdMask and WrMask, then
11960                  * make sure the read-only bits are not changed and the
11961                  * read/write bits are all ones.
11962                  */
11963                 tw32(offset, read_mask | write_mask);
11964
11965                 val = tr32(offset);
11966
11967                 /* Test the read-only bits. */
11968                 if ((val & read_mask) != read_val)
11969                         goto out;
11970
11971                 /* Test the read/write bits. */
11972                 if ((val & write_mask) != write_mask)
11973                         goto out;
11974
11975                 tw32(offset, save_val);
11976         }
11977
11978         return 0;
11979
11980 out:
11981         if (netif_msg_hw(tp))
11982                 netdev_err(tp->dev,
11983                            "Register test failed at offset %x\n", offset);
11984         tw32(offset, save_val);
11985         return -EIO;
11986 }
11987
11988 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11989 {
11990         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11991         int i;
11992         u32 j;
11993
11994         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11995                 for (j = 0; j < len; j += 4) {
11996                         u32 val;
11997
11998                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11999                         tg3_read_mem(tp, offset + j, &val);
12000                         if (val != test_pattern[i])
12001                                 return -EIO;
12002                 }
12003         }
12004         return 0;
12005 }
12006
12007 static int tg3_test_memory(struct tg3 *tp)
12008 {
12009         static struct mem_entry {
12010                 u32 offset;
12011                 u32 len;
12012         } mem_tbl_570x[] = {
12013                 { 0x00000000, 0x00b50},
12014                 { 0x00002000, 0x1c000},
12015                 { 0xffffffff, 0x00000}
12016         }, mem_tbl_5705[] = {
12017                 { 0x00000100, 0x0000c},
12018                 { 0x00000200, 0x00008},
12019                 { 0x00004000, 0x00800},
12020                 { 0x00006000, 0x01000},
12021                 { 0x00008000, 0x02000},
12022                 { 0x00010000, 0x0e000},
12023                 { 0xffffffff, 0x00000}
12024         }, mem_tbl_5755[] = {
12025                 { 0x00000200, 0x00008},
12026                 { 0x00004000, 0x00800},
12027                 { 0x00006000, 0x00800},
12028                 { 0x00008000, 0x02000},
12029                 { 0x00010000, 0x0c000},
12030                 { 0xffffffff, 0x00000}
12031         }, mem_tbl_5906[] = {
12032                 { 0x00000200, 0x00008},
12033                 { 0x00004000, 0x00400},
12034                 { 0x00006000, 0x00400},
12035                 { 0x00008000, 0x01000},
12036                 { 0x00010000, 0x01000},
12037                 { 0xffffffff, 0x00000}
12038         }, mem_tbl_5717[] = {
12039                 { 0x00000200, 0x00008},
12040                 { 0x00010000, 0x0a000},
12041                 { 0x00020000, 0x13c00},
12042                 { 0xffffffff, 0x00000}
12043         }, mem_tbl_57765[] = {
12044                 { 0x00000200, 0x00008},
12045                 { 0x00004000, 0x00800},
12046                 { 0x00006000, 0x09800},
12047                 { 0x00010000, 0x0a000},
12048                 { 0xffffffff, 0x00000}
12049         };
12050         struct mem_entry *mem_tbl;
12051         int err = 0;
12052         int i;
12053
12054         if (tg3_flag(tp, 5717_PLUS))
12055                 mem_tbl = mem_tbl_5717;
12056         else if (tg3_flag(tp, 57765_CLASS))
12057                 mem_tbl = mem_tbl_57765;
12058         else if (tg3_flag(tp, 5755_PLUS))
12059                 mem_tbl = mem_tbl_5755;
12060         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12061                 mem_tbl = mem_tbl_5906;
12062         else if (tg3_flag(tp, 5705_PLUS))
12063                 mem_tbl = mem_tbl_5705;
12064         else
12065                 mem_tbl = mem_tbl_570x;
12066
12067         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12068                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12069                 if (err)
12070                         break;
12071         }
12072
12073         return err;
12074 }
12075
12076 #define TG3_TSO_MSS             500
12077
12078 #define TG3_TSO_IP_HDR_LEN      20
12079 #define TG3_TSO_TCP_HDR_LEN     20
12080 #define TG3_TSO_TCP_OPT_LEN     12
12081
12082 static const u8 tg3_tso_header[] = {
12083 0x08, 0x00,
12084 0x45, 0x00, 0x00, 0x00,
12085 0x00, 0x00, 0x40, 0x00,
12086 0x40, 0x06, 0x00, 0x00,
12087 0x0a, 0x00, 0x00, 0x01,
12088 0x0a, 0x00, 0x00, 0x02,
12089 0x0d, 0x00, 0xe0, 0x00,
12090 0x00, 0x00, 0x01, 0x00,
12091 0x00, 0x00, 0x02, 0x00,
12092 0x80, 0x10, 0x10, 0x00,
12093 0x14, 0x09, 0x00, 0x00,
12094 0x01, 0x01, 0x08, 0x0a,
12095 0x11, 0x11, 0x11, 0x11,
12096 0x11, 0x11, 0x11, 0x11,
12097 };
12098
12099 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12100 {
12101         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12102         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12103         u32 budget;
12104         struct sk_buff *skb;
12105         u8 *tx_data, *rx_data;
12106         dma_addr_t map;
12107         int num_pkts, tx_len, rx_len, i, err;
12108         struct tg3_rx_buffer_desc *desc;
12109         struct tg3_napi *tnapi, *rnapi;
12110         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12111
12112         tnapi = &tp->napi[0];
12113         rnapi = &tp->napi[0];
12114         if (tp->irq_cnt > 1) {
12115                 if (tg3_flag(tp, ENABLE_RSS))
12116                         rnapi = &tp->napi[1];
12117                 if (tg3_flag(tp, ENABLE_TSS))
12118                         tnapi = &tp->napi[1];
12119         }
12120         coal_now = tnapi->coal_now | rnapi->coal_now;
12121
12122         err = -EIO;
12123
12124         tx_len = pktsz;
12125         skb = netdev_alloc_skb(tp->dev, tx_len);
12126         if (!skb)
12127                 return -ENOMEM;
12128
12129         tx_data = skb_put(skb, tx_len);
12130         memcpy(tx_data, tp->dev->dev_addr, 6);
12131         memset(tx_data + 6, 0x0, 8);
12132
12133         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12134
12135         if (tso_loopback) {
12136                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12137
12138                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12139                               TG3_TSO_TCP_OPT_LEN;
12140
12141                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12142                        sizeof(tg3_tso_header));
12143                 mss = TG3_TSO_MSS;
12144
12145                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12146                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12147
12148                 /* Set the total length field in the IP header */
12149                 iph->tot_len = htons((u16)(mss + hdr_len));
12150
12151                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12152                               TXD_FLAG_CPU_POST_DMA);
12153
12154                 if (tg3_flag(tp, HW_TSO_1) ||
12155                     tg3_flag(tp, HW_TSO_2) ||
12156                     tg3_flag(tp, HW_TSO_3)) {
12157                         struct tcphdr *th;
12158                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12159                         th = (struct tcphdr *)&tx_data[val];
12160                         th->check = 0;
12161                 } else
12162                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12163
12164                 if (tg3_flag(tp, HW_TSO_3)) {
12165                         mss |= (hdr_len & 0xc) << 12;
12166                         if (hdr_len & 0x10)
12167                                 base_flags |= 0x00000010;
12168                         base_flags |= (hdr_len & 0x3e0) << 5;
12169                 } else if (tg3_flag(tp, HW_TSO_2))
12170                         mss |= hdr_len << 9;
12171                 else if (tg3_flag(tp, HW_TSO_1) ||
12172                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12173                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12174                 } else {
12175                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12176                 }
12177
12178                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12179         } else {
12180                 num_pkts = 1;
12181                 data_off = ETH_HLEN;
12182
12183                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12184                     tx_len > VLAN_ETH_FRAME_LEN)
12185                         base_flags |= TXD_FLAG_JMB_PKT;
12186         }
12187
12188         for (i = data_off; i < tx_len; i++)
12189                 tx_data[i] = (u8) (i & 0xff);
12190
12191         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12192         if (pci_dma_mapping_error(tp->pdev, map)) {
12193                 dev_kfree_skb(skb);
12194                 return -EIO;
12195         }
12196
12197         val = tnapi->tx_prod;
12198         tnapi->tx_buffers[val].skb = skb;
12199         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12200
12201         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12202                rnapi->coal_now);
12203
12204         udelay(10);
12205
12206         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12207
12208         budget = tg3_tx_avail(tnapi);
12209         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12210                             base_flags | TXD_FLAG_END, mss, 0)) {
12211                 tnapi->tx_buffers[val].skb = NULL;
12212                 dev_kfree_skb(skb);
12213                 return -EIO;
12214         }
12215
12216         tnapi->tx_prod++;
12217
12218         /* Sync BD data before updating mailbox */
12219         wmb();
12220
12221         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12222         tr32_mailbox(tnapi->prodmbox);
12223
12224         udelay(10);
12225
12226         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12227         for (i = 0; i < 35; i++) {
12228                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12229                        coal_now);
12230
12231                 udelay(10);
12232
12233                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12234                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12235                 if ((tx_idx == tnapi->tx_prod) &&
12236                     (rx_idx == (rx_start_idx + num_pkts)))
12237                         break;
12238         }
12239
12240         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12241         dev_kfree_skb(skb);
12242
12243         if (tx_idx != tnapi->tx_prod)
12244                 goto out;
12245
12246         if (rx_idx != rx_start_idx + num_pkts)
12247                 goto out;
12248
12249         val = data_off;
12250         while (rx_idx != rx_start_idx) {
12251                 desc = &rnapi->rx_rcb[rx_start_idx++];
12252                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12253                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12254
12255                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12256                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12257                         goto out;
12258
12259                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12260                          - ETH_FCS_LEN;
12261
12262                 if (!tso_loopback) {
12263                         if (rx_len != tx_len)
12264                                 goto out;
12265
12266                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12267                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12268                                         goto out;
12269                         } else {
12270                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12271                                         goto out;
12272                         }
12273                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12274                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12275                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12276                         goto out;
12277                 }
12278
12279                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12280                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12281                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12282                                              mapping);
12283                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12284                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12285                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12286                                              mapping);
12287                 } else
12288                         goto out;
12289
12290                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12291                                             PCI_DMA_FROMDEVICE);
12292
12293                 rx_data += TG3_RX_OFFSET(tp);
12294                 for (i = data_off; i < rx_len; i++, val++) {
12295                         if (*(rx_data + i) != (u8) (val & 0xff))
12296                                 goto out;
12297                 }
12298         }
12299
12300         err = 0;
12301
12302         /* tg3_free_rings will unmap and free the rx_data */
12303 out:
12304         return err;
12305 }
12306
12307 #define TG3_STD_LOOPBACK_FAILED         1
12308 #define TG3_JMB_LOOPBACK_FAILED         2
12309 #define TG3_TSO_LOOPBACK_FAILED         4
12310 #define TG3_LOOPBACK_FAILED \
12311         (TG3_STD_LOOPBACK_FAILED | \
12312          TG3_JMB_LOOPBACK_FAILED | \
12313          TG3_TSO_LOOPBACK_FAILED)
12314
12315 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12316 {
12317         int err = -EIO;
12318         u32 eee_cap;
12319         u32 jmb_pkt_sz = 9000;
12320
12321         if (tp->dma_limit)
12322                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12323
12324         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12325         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12326
12327         if (!netif_running(tp->dev)) {
12328                 data[0] = TG3_LOOPBACK_FAILED;
12329                 data[1] = TG3_LOOPBACK_FAILED;
12330                 if (do_extlpbk)
12331                         data[2] = TG3_LOOPBACK_FAILED;
12332                 goto done;
12333         }
12334
12335         err = tg3_reset_hw(tp, 1);
12336         if (err) {
12337                 data[0] = TG3_LOOPBACK_FAILED;
12338                 data[1] = TG3_LOOPBACK_FAILED;
12339                 if (do_extlpbk)
12340                         data[2] = TG3_LOOPBACK_FAILED;
12341                 goto done;
12342         }
12343
12344         if (tg3_flag(tp, ENABLE_RSS)) {
12345                 int i;
12346
12347                 /* Reroute all rx packets to the 1st queue */
12348                 for (i = MAC_RSS_INDIR_TBL_0;
12349                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12350                         tw32(i, 0x0);
12351         }
12352
12353         /* HW errata - mac loopback fails in some cases on 5780.
12354          * Normal traffic and PHY loopback are not affected by
12355          * errata.  Also, the MAC loopback test is deprecated for
12356          * all newer ASIC revisions.
12357          */
12358         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12359             !tg3_flag(tp, CPMU_PRESENT)) {
12360                 tg3_mac_loopback(tp, true);
12361
12362                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12363                         data[0] |= TG3_STD_LOOPBACK_FAILED;
12364
12365                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12366                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12367                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
12368
12369                 tg3_mac_loopback(tp, false);
12370         }
12371
12372         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12373             !tg3_flag(tp, USE_PHYLIB)) {
12374                 int i;
12375
12376                 tg3_phy_lpbk_set(tp, 0, false);
12377
12378                 /* Wait for link */
12379                 for (i = 0; i < 100; i++) {
12380                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12381                                 break;
12382                         mdelay(1);
12383                 }
12384
12385                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12386                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12387                 if (tg3_flag(tp, TSO_CAPABLE) &&
12388                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12389                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12390                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12391                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12392                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12393
12394                 if (do_extlpbk) {
12395                         tg3_phy_lpbk_set(tp, 0, true);
12396
12397                         /* All link indications report up, but the hardware
12398                          * isn't really ready for about 20 msec.  Double it
12399                          * to be sure.
12400                          */
12401                         mdelay(40);
12402
12403                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12404                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12405                         if (tg3_flag(tp, TSO_CAPABLE) &&
12406                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12407                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12408                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12409                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12410                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12411                 }
12412
12413                 /* Re-enable gphy autopowerdown. */
12414                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12415                         tg3_phy_toggle_apd(tp, true);
12416         }
12417
12418         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12419
12420 done:
12421         tp->phy_flags |= eee_cap;
12422
12423         return err;
12424 }
12425
12426 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12427                           u64 *data)
12428 {
12429         struct tg3 *tp = netdev_priv(dev);
12430         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12431
12432         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12433             tg3_power_up(tp)) {
12434                 etest->flags |= ETH_TEST_FL_FAILED;
12435                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12436                 return;
12437         }
12438
12439         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12440
12441         if (tg3_test_nvram(tp) != 0) {
12442                 etest->flags |= ETH_TEST_FL_FAILED;
12443                 data[0] = 1;
12444         }
12445         if (!doextlpbk && tg3_test_link(tp)) {
12446                 etest->flags |= ETH_TEST_FL_FAILED;
12447                 data[1] = 1;
12448         }
12449         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12450                 int err, err2 = 0, irq_sync = 0;
12451
12452                 if (netif_running(dev)) {
12453                         tg3_phy_stop(tp);
12454                         tg3_netif_stop(tp);
12455                         irq_sync = 1;
12456                 }
12457
12458                 tg3_full_lock(tp, irq_sync);
12459
12460                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12461                 err = tg3_nvram_lock(tp);
12462                 tg3_halt_cpu(tp, RX_CPU_BASE);
12463                 if (!tg3_flag(tp, 5705_PLUS))
12464                         tg3_halt_cpu(tp, TX_CPU_BASE);
12465                 if (!err)
12466                         tg3_nvram_unlock(tp);
12467
12468                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12469                         tg3_phy_reset(tp);
12470
12471                 if (tg3_test_registers(tp) != 0) {
12472                         etest->flags |= ETH_TEST_FL_FAILED;
12473                         data[2] = 1;
12474                 }
12475
12476                 if (tg3_test_memory(tp) != 0) {
12477                         etest->flags |= ETH_TEST_FL_FAILED;
12478                         data[3] = 1;
12479                 }
12480
12481                 if (doextlpbk)
12482                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12483
12484                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12485                         etest->flags |= ETH_TEST_FL_FAILED;
12486
12487                 tg3_full_unlock(tp);
12488
12489                 if (tg3_test_interrupt(tp) != 0) {
12490                         etest->flags |= ETH_TEST_FL_FAILED;
12491                         data[7] = 1;
12492                 }
12493
12494                 tg3_full_lock(tp, 0);
12495
12496                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12497                 if (netif_running(dev)) {
12498                         tg3_flag_set(tp, INIT_COMPLETE);
12499                         err2 = tg3_restart_hw(tp, 1);
12500                         if (!err2)
12501                                 tg3_netif_start(tp);
12502                 }
12503
12504                 tg3_full_unlock(tp);
12505
12506                 if (irq_sync && !err2)
12507                         tg3_phy_start(tp);
12508         }
12509         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12510                 tg3_power_down(tp);
12511
12512 }
12513
12514 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12515 {
12516         struct mii_ioctl_data *data = if_mii(ifr);
12517         struct tg3 *tp = netdev_priv(dev);
12518         int err;
12519
12520         if (tg3_flag(tp, USE_PHYLIB)) {
12521                 struct phy_device *phydev;
12522                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12523                         return -EAGAIN;
12524                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12525                 return phy_mii_ioctl(phydev, ifr, cmd);
12526         }
12527
12528         switch (cmd) {
12529         case SIOCGMIIPHY:
12530                 data->phy_id = tp->phy_addr;
12531
12532                 /* fallthru */
12533         case SIOCGMIIREG: {
12534                 u32 mii_regval;
12535
12536                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12537                         break;                  /* We have no PHY */
12538
12539                 if (!netif_running(dev))
12540                         return -EAGAIN;
12541
12542                 spin_lock_bh(&tp->lock);
12543                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12544                 spin_unlock_bh(&tp->lock);
12545
12546                 data->val_out = mii_regval;
12547
12548                 return err;
12549         }
12550
12551         case SIOCSMIIREG:
12552                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12553                         break;                  /* We have no PHY */
12554
12555                 if (!netif_running(dev))
12556                         return -EAGAIN;
12557
12558                 spin_lock_bh(&tp->lock);
12559                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12560                 spin_unlock_bh(&tp->lock);
12561
12562                 return err;
12563
12564         default:
12565                 /* do nothing */
12566                 break;
12567         }
12568         return -EOPNOTSUPP;
12569 }
12570
12571 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12572 {
12573         struct tg3 *tp = netdev_priv(dev);
12574
12575         memcpy(ec, &tp->coal, sizeof(*ec));
12576         return 0;
12577 }
12578
12579 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12580 {
12581         struct tg3 *tp = netdev_priv(dev);
12582         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12583         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12584
12585         if (!tg3_flag(tp, 5705_PLUS)) {
12586                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12587                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12588                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12589                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12590         }
12591
12592         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12593             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12594             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12595             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12596             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12597             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12598             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12599             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12600             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12601             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12602                 return -EINVAL;
12603
12604         /* No rx interrupts will be generated if both are zero */
12605         if ((ec->rx_coalesce_usecs == 0) &&
12606             (ec->rx_max_coalesced_frames == 0))
12607                 return -EINVAL;
12608
12609         /* No tx interrupts will be generated if both are zero */
12610         if ((ec->tx_coalesce_usecs == 0) &&
12611             (ec->tx_max_coalesced_frames == 0))
12612                 return -EINVAL;
12613
12614         /* Only copy relevant parameters, ignore all others. */
12615         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12616         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12617         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12618         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12619         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12620         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12621         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12622         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12623         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12624
12625         if (netif_running(dev)) {
12626                 tg3_full_lock(tp, 0);
12627                 __tg3_set_coalesce(tp, &tp->coal);
12628                 tg3_full_unlock(tp);
12629         }
12630         return 0;
12631 }
12632
12633 static const struct ethtool_ops tg3_ethtool_ops = {
12634         .get_settings           = tg3_get_settings,
12635         .set_settings           = tg3_set_settings,
12636         .get_drvinfo            = tg3_get_drvinfo,
12637         .get_regs_len           = tg3_get_regs_len,
12638         .get_regs               = tg3_get_regs,
12639         .get_wol                = tg3_get_wol,
12640         .set_wol                = tg3_set_wol,
12641         .get_msglevel           = tg3_get_msglevel,
12642         .set_msglevel           = tg3_set_msglevel,
12643         .nway_reset             = tg3_nway_reset,
12644         .get_link               = ethtool_op_get_link,
12645         .get_eeprom_len         = tg3_get_eeprom_len,
12646         .get_eeprom             = tg3_get_eeprom,
12647         .set_eeprom             = tg3_set_eeprom,
12648         .get_ringparam          = tg3_get_ringparam,
12649         .set_ringparam          = tg3_set_ringparam,
12650         .get_pauseparam         = tg3_get_pauseparam,
12651         .set_pauseparam         = tg3_set_pauseparam,
12652         .self_test              = tg3_self_test,
12653         .get_strings            = tg3_get_strings,
12654         .set_phys_id            = tg3_set_phys_id,
12655         .get_ethtool_stats      = tg3_get_ethtool_stats,
12656         .get_coalesce           = tg3_get_coalesce,
12657         .set_coalesce           = tg3_set_coalesce,
12658         .get_sset_count         = tg3_get_sset_count,
12659         .get_rxnfc              = tg3_get_rxnfc,
12660         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12661         .get_rxfh_indir         = tg3_get_rxfh_indir,
12662         .set_rxfh_indir         = tg3_set_rxfh_indir,
12663         .get_channels           = tg3_get_channels,
12664         .set_channels           = tg3_set_channels,
12665         .get_ts_info            = ethtool_op_get_ts_info,
12666 };
12667
12668 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12669                                                 struct rtnl_link_stats64 *stats)
12670 {
12671         struct tg3 *tp = netdev_priv(dev);
12672
12673         spin_lock_bh(&tp->lock);
12674         if (!tp->hw_stats) {
12675                 spin_unlock_bh(&tp->lock);
12676                 return &tp->net_stats_prev;
12677         }
12678
12679         tg3_get_nstats(tp, stats);
12680         spin_unlock_bh(&tp->lock);
12681
12682         return stats;
12683 }
12684
12685 static void tg3_set_rx_mode(struct net_device *dev)
12686 {
12687         struct tg3 *tp = netdev_priv(dev);
12688
12689         if (!netif_running(dev))
12690                 return;
12691
12692         tg3_full_lock(tp, 0);
12693         __tg3_set_rx_mode(dev);
12694         tg3_full_unlock(tp);
12695 }
12696
12697 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12698                                int new_mtu)
12699 {
12700         dev->mtu = new_mtu;
12701
12702         if (new_mtu > ETH_DATA_LEN) {
12703                 if (tg3_flag(tp, 5780_CLASS)) {
12704                         netdev_update_features(dev);
12705                         tg3_flag_clear(tp, TSO_CAPABLE);
12706                 } else {
12707                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12708                 }
12709         } else {
12710                 if (tg3_flag(tp, 5780_CLASS)) {
12711                         tg3_flag_set(tp, TSO_CAPABLE);
12712                         netdev_update_features(dev);
12713                 }
12714                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12715         }
12716 }
12717
12718 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12719 {
12720         struct tg3 *tp = netdev_priv(dev);
12721         int err, reset_phy = 0;
12722
12723         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12724                 return -EINVAL;
12725
12726         if (!netif_running(dev)) {
12727                 /* We'll just catch it later when the
12728                  * device is up'd.
12729                  */
12730                 tg3_set_mtu(dev, tp, new_mtu);
12731                 return 0;
12732         }
12733
12734         tg3_phy_stop(tp);
12735
12736         tg3_netif_stop(tp);
12737
12738         tg3_full_lock(tp, 1);
12739
12740         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12741
12742         tg3_set_mtu(dev, tp, new_mtu);
12743
12744         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12745          * breaks all requests to 256 bytes.
12746          */
12747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12748                 reset_phy = 1;
12749
12750         err = tg3_restart_hw(tp, reset_phy);
12751
12752         if (!err)
12753                 tg3_netif_start(tp);
12754
12755         tg3_full_unlock(tp);
12756
12757         if (!err)
12758                 tg3_phy_start(tp);
12759
12760         return err;
12761 }
12762
12763 static const struct net_device_ops tg3_netdev_ops = {
12764         .ndo_open               = tg3_open,
12765         .ndo_stop               = tg3_close,
12766         .ndo_start_xmit         = tg3_start_xmit,
12767         .ndo_get_stats64        = tg3_get_stats64,
12768         .ndo_validate_addr      = eth_validate_addr,
12769         .ndo_set_rx_mode        = tg3_set_rx_mode,
12770         .ndo_set_mac_address    = tg3_set_mac_addr,
12771         .ndo_do_ioctl           = tg3_ioctl,
12772         .ndo_tx_timeout         = tg3_tx_timeout,
12773         .ndo_change_mtu         = tg3_change_mtu,
12774         .ndo_fix_features       = tg3_fix_features,
12775         .ndo_set_features       = tg3_set_features,
12776 #ifdef CONFIG_NET_POLL_CONTROLLER
12777         .ndo_poll_controller    = tg3_poll_controller,
12778 #endif
12779 };
12780
12781 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12782 {
12783         u32 cursize, val, magic;
12784
12785         tp->nvram_size = EEPROM_CHIP_SIZE;
12786
12787         if (tg3_nvram_read(tp, 0, &magic) != 0)
12788                 return;
12789
12790         if ((magic != TG3_EEPROM_MAGIC) &&
12791             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12792             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12793                 return;
12794
12795         /*
12796          * Size the chip by reading offsets at increasing powers of two.
12797          * When we encounter our validation signature, we know the addressing
12798          * has wrapped around, and thus have our chip size.
12799          */
12800         cursize = 0x10;
12801
12802         while (cursize < tp->nvram_size) {
12803                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12804                         return;
12805
12806                 if (val == magic)
12807                         break;
12808
12809                 cursize <<= 1;
12810         }
12811
12812         tp->nvram_size = cursize;
12813 }
12814
12815 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12816 {
12817         u32 val;
12818
12819         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12820                 return;
12821
12822         /* Selfboot format */
12823         if (val != TG3_EEPROM_MAGIC) {
12824                 tg3_get_eeprom_size(tp);
12825                 return;
12826         }
12827
12828         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12829                 if (val != 0) {
12830                         /* This is confusing.  We want to operate on the
12831                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12832                          * call will read from NVRAM and byteswap the data
12833                          * according to the byteswapping settings for all
12834                          * other register accesses.  This ensures the data we
12835                          * want will always reside in the lower 16-bits.
12836                          * However, the data in NVRAM is in LE format, which
12837                          * means the data from the NVRAM read will always be
12838                          * opposite the endianness of the CPU.  The 16-bit
12839                          * byteswap then brings the data to CPU endianness.
12840                          */
12841                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12842                         return;
12843                 }
12844         }
12845         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12846 }
12847
12848 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12849 {
12850         u32 nvcfg1;
12851
12852         nvcfg1 = tr32(NVRAM_CFG1);
12853         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12854                 tg3_flag_set(tp, FLASH);
12855         } else {
12856                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12857                 tw32(NVRAM_CFG1, nvcfg1);
12858         }
12859
12860         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12861             tg3_flag(tp, 5780_CLASS)) {
12862                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12863                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12864                         tp->nvram_jedecnum = JEDEC_ATMEL;
12865                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12866                         tg3_flag_set(tp, NVRAM_BUFFERED);
12867                         break;
12868                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12869                         tp->nvram_jedecnum = JEDEC_ATMEL;
12870                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12871                         break;
12872                 case FLASH_VENDOR_ATMEL_EEPROM:
12873                         tp->nvram_jedecnum = JEDEC_ATMEL;
12874                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12875                         tg3_flag_set(tp, NVRAM_BUFFERED);
12876                         break;
12877                 case FLASH_VENDOR_ST:
12878                         tp->nvram_jedecnum = JEDEC_ST;
12879                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12880                         tg3_flag_set(tp, NVRAM_BUFFERED);
12881                         break;
12882                 case FLASH_VENDOR_SAIFUN:
12883                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12884                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12885                         break;
12886                 case FLASH_VENDOR_SST_SMALL:
12887                 case FLASH_VENDOR_SST_LARGE:
12888                         tp->nvram_jedecnum = JEDEC_SST;
12889                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12890                         break;
12891                 }
12892         } else {
12893                 tp->nvram_jedecnum = JEDEC_ATMEL;
12894                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12895                 tg3_flag_set(tp, NVRAM_BUFFERED);
12896         }
12897 }
12898
12899 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12900 {
12901         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12902         case FLASH_5752PAGE_SIZE_256:
12903                 tp->nvram_pagesize = 256;
12904                 break;
12905         case FLASH_5752PAGE_SIZE_512:
12906                 tp->nvram_pagesize = 512;
12907                 break;
12908         case FLASH_5752PAGE_SIZE_1K:
12909                 tp->nvram_pagesize = 1024;
12910                 break;
12911         case FLASH_5752PAGE_SIZE_2K:
12912                 tp->nvram_pagesize = 2048;
12913                 break;
12914         case FLASH_5752PAGE_SIZE_4K:
12915                 tp->nvram_pagesize = 4096;
12916                 break;
12917         case FLASH_5752PAGE_SIZE_264:
12918                 tp->nvram_pagesize = 264;
12919                 break;
12920         case FLASH_5752PAGE_SIZE_528:
12921                 tp->nvram_pagesize = 528;
12922                 break;
12923         }
12924 }
12925
12926 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12927 {
12928         u32 nvcfg1;
12929
12930         nvcfg1 = tr32(NVRAM_CFG1);
12931
12932         /* NVRAM protection for TPM */
12933         if (nvcfg1 & (1 << 27))
12934                 tg3_flag_set(tp, PROTECTED_NVRAM);
12935
12936         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12937         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12938         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12939                 tp->nvram_jedecnum = JEDEC_ATMEL;
12940                 tg3_flag_set(tp, NVRAM_BUFFERED);
12941                 break;
12942         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12943                 tp->nvram_jedecnum = JEDEC_ATMEL;
12944                 tg3_flag_set(tp, NVRAM_BUFFERED);
12945                 tg3_flag_set(tp, FLASH);
12946                 break;
12947         case FLASH_5752VENDOR_ST_M45PE10:
12948         case FLASH_5752VENDOR_ST_M45PE20:
12949         case FLASH_5752VENDOR_ST_M45PE40:
12950                 tp->nvram_jedecnum = JEDEC_ST;
12951                 tg3_flag_set(tp, NVRAM_BUFFERED);
12952                 tg3_flag_set(tp, FLASH);
12953                 break;
12954         }
12955
12956         if (tg3_flag(tp, FLASH)) {
12957                 tg3_nvram_get_pagesize(tp, nvcfg1);
12958         } else {
12959                 /* For eeprom, set pagesize to maximum eeprom size */
12960                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12961
12962                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12963                 tw32(NVRAM_CFG1, nvcfg1);
12964         }
12965 }
12966
12967 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12968 {
12969         u32 nvcfg1, protect = 0;
12970
12971         nvcfg1 = tr32(NVRAM_CFG1);
12972
12973         /* NVRAM protection for TPM */
12974         if (nvcfg1 & (1 << 27)) {
12975                 tg3_flag_set(tp, PROTECTED_NVRAM);
12976                 protect = 1;
12977         }
12978
12979         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12980         switch (nvcfg1) {
12981         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12982         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12983         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12984         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12985                 tp->nvram_jedecnum = JEDEC_ATMEL;
12986                 tg3_flag_set(tp, NVRAM_BUFFERED);
12987                 tg3_flag_set(tp, FLASH);
12988                 tp->nvram_pagesize = 264;
12989                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12990                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12991                         tp->nvram_size = (protect ? 0x3e200 :
12992                                           TG3_NVRAM_SIZE_512KB);
12993                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12994                         tp->nvram_size = (protect ? 0x1f200 :
12995                                           TG3_NVRAM_SIZE_256KB);
12996                 else
12997                         tp->nvram_size = (protect ? 0x1f200 :
12998                                           TG3_NVRAM_SIZE_128KB);
12999                 break;
13000         case FLASH_5752VENDOR_ST_M45PE10:
13001         case FLASH_5752VENDOR_ST_M45PE20:
13002         case FLASH_5752VENDOR_ST_M45PE40:
13003                 tp->nvram_jedecnum = JEDEC_ST;
13004                 tg3_flag_set(tp, NVRAM_BUFFERED);
13005                 tg3_flag_set(tp, FLASH);
13006                 tp->nvram_pagesize = 256;
13007                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13008                         tp->nvram_size = (protect ?
13009                                           TG3_NVRAM_SIZE_64KB :
13010                                           TG3_NVRAM_SIZE_128KB);
13011                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13012                         tp->nvram_size = (protect ?
13013                                           TG3_NVRAM_SIZE_64KB :
13014                                           TG3_NVRAM_SIZE_256KB);
13015                 else
13016                         tp->nvram_size = (protect ?
13017                                           TG3_NVRAM_SIZE_128KB :
13018                                           TG3_NVRAM_SIZE_512KB);
13019                 break;
13020         }
13021 }
13022
13023 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13024 {
13025         u32 nvcfg1;
13026
13027         nvcfg1 = tr32(NVRAM_CFG1);
13028
13029         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13030         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13031         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13032         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13033         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13034                 tp->nvram_jedecnum = JEDEC_ATMEL;
13035                 tg3_flag_set(tp, NVRAM_BUFFERED);
13036                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13037
13038                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13039                 tw32(NVRAM_CFG1, nvcfg1);
13040                 break;
13041         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13042         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13043         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13044         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13045                 tp->nvram_jedecnum = JEDEC_ATMEL;
13046                 tg3_flag_set(tp, NVRAM_BUFFERED);
13047                 tg3_flag_set(tp, FLASH);
13048                 tp->nvram_pagesize = 264;
13049                 break;
13050         case FLASH_5752VENDOR_ST_M45PE10:
13051         case FLASH_5752VENDOR_ST_M45PE20:
13052         case FLASH_5752VENDOR_ST_M45PE40:
13053                 tp->nvram_jedecnum = JEDEC_ST;
13054                 tg3_flag_set(tp, NVRAM_BUFFERED);
13055                 tg3_flag_set(tp, FLASH);
13056                 tp->nvram_pagesize = 256;
13057                 break;
13058         }
13059 }
13060
13061 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13062 {
13063         u32 nvcfg1, protect = 0;
13064
13065         nvcfg1 = tr32(NVRAM_CFG1);
13066
13067         /* NVRAM protection for TPM */
13068         if (nvcfg1 & (1 << 27)) {
13069                 tg3_flag_set(tp, PROTECTED_NVRAM);
13070                 protect = 1;
13071         }
13072
13073         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13074         switch (nvcfg1) {
13075         case FLASH_5761VENDOR_ATMEL_ADB021D:
13076         case FLASH_5761VENDOR_ATMEL_ADB041D:
13077         case FLASH_5761VENDOR_ATMEL_ADB081D:
13078         case FLASH_5761VENDOR_ATMEL_ADB161D:
13079         case FLASH_5761VENDOR_ATMEL_MDB021D:
13080         case FLASH_5761VENDOR_ATMEL_MDB041D:
13081         case FLASH_5761VENDOR_ATMEL_MDB081D:
13082         case FLASH_5761VENDOR_ATMEL_MDB161D:
13083                 tp->nvram_jedecnum = JEDEC_ATMEL;
13084                 tg3_flag_set(tp, NVRAM_BUFFERED);
13085                 tg3_flag_set(tp, FLASH);
13086                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13087                 tp->nvram_pagesize = 256;
13088                 break;
13089         case FLASH_5761VENDOR_ST_A_M45PE20:
13090         case FLASH_5761VENDOR_ST_A_M45PE40:
13091         case FLASH_5761VENDOR_ST_A_M45PE80:
13092         case FLASH_5761VENDOR_ST_A_M45PE16:
13093         case FLASH_5761VENDOR_ST_M_M45PE20:
13094         case FLASH_5761VENDOR_ST_M_M45PE40:
13095         case FLASH_5761VENDOR_ST_M_M45PE80:
13096         case FLASH_5761VENDOR_ST_M_M45PE16:
13097                 tp->nvram_jedecnum = JEDEC_ST;
13098                 tg3_flag_set(tp, NVRAM_BUFFERED);
13099                 tg3_flag_set(tp, FLASH);
13100                 tp->nvram_pagesize = 256;
13101                 break;
13102         }
13103
13104         if (protect) {
13105                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13106         } else {
13107                 switch (nvcfg1) {
13108                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13109                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13110                 case FLASH_5761VENDOR_ST_A_M45PE16:
13111                 case FLASH_5761VENDOR_ST_M_M45PE16:
13112                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13113                         break;
13114                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13115                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13116                 case FLASH_5761VENDOR_ST_A_M45PE80:
13117                 case FLASH_5761VENDOR_ST_M_M45PE80:
13118                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13119                         break;
13120                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13121                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13122                 case FLASH_5761VENDOR_ST_A_M45PE40:
13123                 case FLASH_5761VENDOR_ST_M_M45PE40:
13124                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13125                         break;
13126                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13127                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13128                 case FLASH_5761VENDOR_ST_A_M45PE20:
13129                 case FLASH_5761VENDOR_ST_M_M45PE20:
13130                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13131                         break;
13132                 }
13133         }
13134 }
13135
13136 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13137 {
13138         tp->nvram_jedecnum = JEDEC_ATMEL;
13139         tg3_flag_set(tp, NVRAM_BUFFERED);
13140         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13141 }
13142
13143 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13144 {
13145         u32 nvcfg1;
13146
13147         nvcfg1 = tr32(NVRAM_CFG1);
13148
13149         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13150         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13151         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13152                 tp->nvram_jedecnum = JEDEC_ATMEL;
13153                 tg3_flag_set(tp, NVRAM_BUFFERED);
13154                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13155
13156                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13157                 tw32(NVRAM_CFG1, nvcfg1);
13158                 return;
13159         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13160         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13161         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13162         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13163         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13164         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13165         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13166                 tp->nvram_jedecnum = JEDEC_ATMEL;
13167                 tg3_flag_set(tp, NVRAM_BUFFERED);
13168                 tg3_flag_set(tp, FLASH);
13169
13170                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13171                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13172                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13173                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13174                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13175                         break;
13176                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13177                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13178                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13179                         break;
13180                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13181                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13182                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13183                         break;
13184                 }
13185                 break;
13186         case FLASH_5752VENDOR_ST_M45PE10:
13187         case FLASH_5752VENDOR_ST_M45PE20:
13188         case FLASH_5752VENDOR_ST_M45PE40:
13189                 tp->nvram_jedecnum = JEDEC_ST;
13190                 tg3_flag_set(tp, NVRAM_BUFFERED);
13191                 tg3_flag_set(tp, FLASH);
13192
13193                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13194                 case FLASH_5752VENDOR_ST_M45PE10:
13195                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13196                         break;
13197                 case FLASH_5752VENDOR_ST_M45PE20:
13198                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13199                         break;
13200                 case FLASH_5752VENDOR_ST_M45PE40:
13201                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13202                         break;
13203                 }
13204                 break;
13205         default:
13206                 tg3_flag_set(tp, NO_NVRAM);
13207                 return;
13208         }
13209
13210         tg3_nvram_get_pagesize(tp, nvcfg1);
13211         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13212                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13213 }
13214
13215
13216 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13217 {
13218         u32 nvcfg1;
13219
13220         nvcfg1 = tr32(NVRAM_CFG1);
13221
13222         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13223         case FLASH_5717VENDOR_ATMEL_EEPROM:
13224         case FLASH_5717VENDOR_MICRO_EEPROM:
13225                 tp->nvram_jedecnum = JEDEC_ATMEL;
13226                 tg3_flag_set(tp, NVRAM_BUFFERED);
13227                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13228
13229                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13230                 tw32(NVRAM_CFG1, nvcfg1);
13231                 return;
13232         case FLASH_5717VENDOR_ATMEL_MDB011D:
13233         case FLASH_5717VENDOR_ATMEL_ADB011B:
13234         case FLASH_5717VENDOR_ATMEL_ADB011D:
13235         case FLASH_5717VENDOR_ATMEL_MDB021D:
13236         case FLASH_5717VENDOR_ATMEL_ADB021B:
13237         case FLASH_5717VENDOR_ATMEL_ADB021D:
13238         case FLASH_5717VENDOR_ATMEL_45USPT:
13239                 tp->nvram_jedecnum = JEDEC_ATMEL;
13240                 tg3_flag_set(tp, NVRAM_BUFFERED);
13241                 tg3_flag_set(tp, FLASH);
13242
13243                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13244                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13245                         /* Detect size with tg3_nvram_get_size() */
13246                         break;
13247                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13248                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13249                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13250                         break;
13251                 default:
13252                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13253                         break;
13254                 }
13255                 break;
13256         case FLASH_5717VENDOR_ST_M_M25PE10:
13257         case FLASH_5717VENDOR_ST_A_M25PE10:
13258         case FLASH_5717VENDOR_ST_M_M45PE10:
13259         case FLASH_5717VENDOR_ST_A_M45PE10:
13260         case FLASH_5717VENDOR_ST_M_M25PE20:
13261         case FLASH_5717VENDOR_ST_A_M25PE20:
13262         case FLASH_5717VENDOR_ST_M_M45PE20:
13263         case FLASH_5717VENDOR_ST_A_M45PE20:
13264         case FLASH_5717VENDOR_ST_25USPT:
13265         case FLASH_5717VENDOR_ST_45USPT:
13266                 tp->nvram_jedecnum = JEDEC_ST;
13267                 tg3_flag_set(tp, NVRAM_BUFFERED);
13268                 tg3_flag_set(tp, FLASH);
13269
13270                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13271                 case FLASH_5717VENDOR_ST_M_M25PE20:
13272                 case FLASH_5717VENDOR_ST_M_M45PE20:
13273                         /* Detect size with tg3_nvram_get_size() */
13274                         break;
13275                 case FLASH_5717VENDOR_ST_A_M25PE20:
13276                 case FLASH_5717VENDOR_ST_A_M45PE20:
13277                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13278                         break;
13279                 default:
13280                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13281                         break;
13282                 }
13283                 break;
13284         default:
13285                 tg3_flag_set(tp, NO_NVRAM);
13286                 return;
13287         }
13288
13289         tg3_nvram_get_pagesize(tp, nvcfg1);
13290         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13291                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13292 }
13293
13294 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13295 {
13296         u32 nvcfg1, nvmpinstrp;
13297
13298         nvcfg1 = tr32(NVRAM_CFG1);
13299         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13300
13301         switch (nvmpinstrp) {
13302         case FLASH_5720_EEPROM_HD:
13303         case FLASH_5720_EEPROM_LD:
13304                 tp->nvram_jedecnum = JEDEC_ATMEL;
13305                 tg3_flag_set(tp, NVRAM_BUFFERED);
13306
13307                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13308                 tw32(NVRAM_CFG1, nvcfg1);
13309                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13310                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13311                 else
13312                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13313                 return;
13314         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13315         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13316         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13317         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13318         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13319         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13320         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13321         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13322         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13323         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13324         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13325         case FLASH_5720VENDOR_ATMEL_45USPT:
13326                 tp->nvram_jedecnum = JEDEC_ATMEL;
13327                 tg3_flag_set(tp, NVRAM_BUFFERED);
13328                 tg3_flag_set(tp, FLASH);
13329
13330                 switch (nvmpinstrp) {
13331                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13332                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13333                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13334                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13335                         break;
13336                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13337                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13338                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13339                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13340                         break;
13341                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13342                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13343                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13344                         break;
13345                 default:
13346                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13347                         break;
13348                 }
13349                 break;
13350         case FLASH_5720VENDOR_M_ST_M25PE10:
13351         case FLASH_5720VENDOR_M_ST_M45PE10:
13352         case FLASH_5720VENDOR_A_ST_M25PE10:
13353         case FLASH_5720VENDOR_A_ST_M45PE10:
13354         case FLASH_5720VENDOR_M_ST_M25PE20:
13355         case FLASH_5720VENDOR_M_ST_M45PE20:
13356         case FLASH_5720VENDOR_A_ST_M25PE20:
13357         case FLASH_5720VENDOR_A_ST_M45PE20:
13358         case FLASH_5720VENDOR_M_ST_M25PE40:
13359         case FLASH_5720VENDOR_M_ST_M45PE40:
13360         case FLASH_5720VENDOR_A_ST_M25PE40:
13361         case FLASH_5720VENDOR_A_ST_M45PE40:
13362         case FLASH_5720VENDOR_M_ST_M25PE80:
13363         case FLASH_5720VENDOR_M_ST_M45PE80:
13364         case FLASH_5720VENDOR_A_ST_M25PE80:
13365         case FLASH_5720VENDOR_A_ST_M45PE80:
13366         case FLASH_5720VENDOR_ST_25USPT:
13367         case FLASH_5720VENDOR_ST_45USPT:
13368                 tp->nvram_jedecnum = JEDEC_ST;
13369                 tg3_flag_set(tp, NVRAM_BUFFERED);
13370                 tg3_flag_set(tp, FLASH);
13371
13372                 switch (nvmpinstrp) {
13373                 case FLASH_5720VENDOR_M_ST_M25PE20:
13374                 case FLASH_5720VENDOR_M_ST_M45PE20:
13375                 case FLASH_5720VENDOR_A_ST_M25PE20:
13376                 case FLASH_5720VENDOR_A_ST_M45PE20:
13377                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13378                         break;
13379                 case FLASH_5720VENDOR_M_ST_M25PE40:
13380                 case FLASH_5720VENDOR_M_ST_M45PE40:
13381                 case FLASH_5720VENDOR_A_ST_M25PE40:
13382                 case FLASH_5720VENDOR_A_ST_M45PE40:
13383                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13384                         break;
13385                 case FLASH_5720VENDOR_M_ST_M25PE80:
13386                 case FLASH_5720VENDOR_M_ST_M45PE80:
13387                 case FLASH_5720VENDOR_A_ST_M25PE80:
13388                 case FLASH_5720VENDOR_A_ST_M45PE80:
13389                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13390                         break;
13391                 default:
13392                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13393                         break;
13394                 }
13395                 break;
13396         default:
13397                 tg3_flag_set(tp, NO_NVRAM);
13398                 return;
13399         }
13400
13401         tg3_nvram_get_pagesize(tp, nvcfg1);
13402         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13403                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13404 }
13405
13406 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13407 static void __devinit tg3_nvram_init(struct tg3 *tp)
13408 {
13409         tw32_f(GRC_EEPROM_ADDR,
13410              (EEPROM_ADDR_FSM_RESET |
13411               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13412                EEPROM_ADDR_CLKPERD_SHIFT)));
13413
13414         msleep(1);
13415
13416         /* Enable seeprom accesses. */
13417         tw32_f(GRC_LOCAL_CTRL,
13418              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13419         udelay(100);
13420
13421         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13422             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13423                 tg3_flag_set(tp, NVRAM);
13424
13425                 if (tg3_nvram_lock(tp)) {
13426                         netdev_warn(tp->dev,
13427                                     "Cannot get nvram lock, %s failed\n",
13428                                     __func__);
13429                         return;
13430                 }
13431                 tg3_enable_nvram_access(tp);
13432
13433                 tp->nvram_size = 0;
13434
13435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13436                         tg3_get_5752_nvram_info(tp);
13437                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13438                         tg3_get_5755_nvram_info(tp);
13439                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13440                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13441                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13442                         tg3_get_5787_nvram_info(tp);
13443                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13444                         tg3_get_5761_nvram_info(tp);
13445                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13446                         tg3_get_5906_nvram_info(tp);
13447                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13448                          tg3_flag(tp, 57765_CLASS))
13449                         tg3_get_57780_nvram_info(tp);
13450                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13451                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13452                         tg3_get_5717_nvram_info(tp);
13453                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13454                         tg3_get_5720_nvram_info(tp);
13455                 else
13456                         tg3_get_nvram_info(tp);
13457
13458                 if (tp->nvram_size == 0)
13459                         tg3_get_nvram_size(tp);
13460
13461                 tg3_disable_nvram_access(tp);
13462                 tg3_nvram_unlock(tp);
13463
13464         } else {
13465                 tg3_flag_clear(tp, NVRAM);
13466                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13467
13468                 tg3_get_eeprom_size(tp);
13469         }
13470 }
13471
13472 struct subsys_tbl_ent {
13473         u16 subsys_vendor, subsys_devid;
13474         u32 phy_id;
13475 };
13476
13477 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13478         /* Broadcom boards. */
13479         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13480           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13481         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13482           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13483         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13484           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13485         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13486           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13487         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13488           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13489         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13490           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13491         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13492           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13493         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13494           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13495         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13496           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13497         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13498           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13499         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13500           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13501
13502         /* 3com boards. */
13503         { TG3PCI_SUBVENDOR_ID_3COM,
13504           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13505         { TG3PCI_SUBVENDOR_ID_3COM,
13506           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13507         { TG3PCI_SUBVENDOR_ID_3COM,
13508           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13509         { TG3PCI_SUBVENDOR_ID_3COM,
13510           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13511         { TG3PCI_SUBVENDOR_ID_3COM,
13512           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13513
13514         /* DELL boards. */
13515         { TG3PCI_SUBVENDOR_ID_DELL,
13516           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13517         { TG3PCI_SUBVENDOR_ID_DELL,
13518           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13519         { TG3PCI_SUBVENDOR_ID_DELL,
13520           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13521         { TG3PCI_SUBVENDOR_ID_DELL,
13522           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13523
13524         /* Compaq boards. */
13525         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13526           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13527         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13528           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13529         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13530           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13531         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13532           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13533         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13534           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13535
13536         /* IBM boards. */
13537         { TG3PCI_SUBVENDOR_ID_IBM,
13538           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13539 };
13540
13541 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13542 {
13543         int i;
13544
13545         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13546                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13547                      tp->pdev->subsystem_vendor) &&
13548                     (subsys_id_to_phy_id[i].subsys_devid ==
13549                      tp->pdev->subsystem_device))
13550                         return &subsys_id_to_phy_id[i];
13551         }
13552         return NULL;
13553 }
13554
13555 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13556 {
13557         u32 val;
13558
13559         tp->phy_id = TG3_PHY_ID_INVALID;
13560         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13561
13562         /* Assume an onboard device and WOL capable by default.  */
13563         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13564         tg3_flag_set(tp, WOL_CAP);
13565
13566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13567                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13568                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13569                         tg3_flag_set(tp, IS_NIC);
13570                 }
13571                 val = tr32(VCPU_CFGSHDW);
13572                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13573                         tg3_flag_set(tp, ASPM_WORKAROUND);
13574                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13575                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13576                         tg3_flag_set(tp, WOL_ENABLE);
13577                         device_set_wakeup_enable(&tp->pdev->dev, true);
13578                 }
13579                 goto done;
13580         }
13581
13582         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13583         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13584                 u32 nic_cfg, led_cfg;
13585                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13586                 int eeprom_phy_serdes = 0;
13587
13588                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13589                 tp->nic_sram_data_cfg = nic_cfg;
13590
13591                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13592                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13593                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13594                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13595                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13596                     (ver > 0) && (ver < 0x100))
13597                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13598
13599                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13600                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13601
13602                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13603                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13604                         eeprom_phy_serdes = 1;
13605
13606                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13607                 if (nic_phy_id != 0) {
13608                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13609                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13610
13611                         eeprom_phy_id  = (id1 >> 16) << 10;
13612                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13613                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13614                 } else
13615                         eeprom_phy_id = 0;
13616
13617                 tp->phy_id = eeprom_phy_id;
13618                 if (eeprom_phy_serdes) {
13619                         if (!tg3_flag(tp, 5705_PLUS))
13620                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13621                         else
13622                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13623                 }
13624
13625                 if (tg3_flag(tp, 5750_PLUS))
13626                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13627                                     SHASTA_EXT_LED_MODE_MASK);
13628                 else
13629                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13630
13631                 switch (led_cfg) {
13632                 default:
13633                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13634                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13635                         break;
13636
13637                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13638                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13639                         break;
13640
13641                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13642                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13643
13644                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13645                          * read on some older 5700/5701 bootcode.
13646                          */
13647                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13648                             ASIC_REV_5700 ||
13649                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13650                             ASIC_REV_5701)
13651                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13652
13653                         break;
13654
13655                 case SHASTA_EXT_LED_SHARED:
13656                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13657                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13658                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13659                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13660                                                  LED_CTRL_MODE_PHY_2);
13661                         break;
13662
13663                 case SHASTA_EXT_LED_MAC:
13664                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13665                         break;
13666
13667                 case SHASTA_EXT_LED_COMBO:
13668                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13669                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13670                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13671                                                  LED_CTRL_MODE_PHY_2);
13672                         break;
13673
13674                 }
13675
13676                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13677                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13678                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13679                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13680
13681                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13682                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13683
13684                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13685                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13686                         if ((tp->pdev->subsystem_vendor ==
13687                              PCI_VENDOR_ID_ARIMA) &&
13688                             (tp->pdev->subsystem_device == 0x205a ||
13689                              tp->pdev->subsystem_device == 0x2063))
13690                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13691                 } else {
13692                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13693                         tg3_flag_set(tp, IS_NIC);
13694                 }
13695
13696                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13697                         tg3_flag_set(tp, ENABLE_ASF);
13698                         if (tg3_flag(tp, 5750_PLUS))
13699                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13700                 }
13701
13702                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13703                     tg3_flag(tp, 5750_PLUS))
13704                         tg3_flag_set(tp, ENABLE_APE);
13705
13706                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13707                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13708                         tg3_flag_clear(tp, WOL_CAP);
13709
13710                 if (tg3_flag(tp, WOL_CAP) &&
13711                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13712                         tg3_flag_set(tp, WOL_ENABLE);
13713                         device_set_wakeup_enable(&tp->pdev->dev, true);
13714                 }
13715
13716                 if (cfg2 & (1 << 17))
13717                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13718
13719                 /* serdes signal pre-emphasis in register 0x590 set by */
13720                 /* bootcode if bit 18 is set */
13721                 if (cfg2 & (1 << 18))
13722                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13723
13724                 if ((tg3_flag(tp, 57765_PLUS) ||
13725                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13726                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13727                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13728                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13729
13730                 if (tg3_flag(tp, PCI_EXPRESS) &&
13731                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13732                     !tg3_flag(tp, 57765_PLUS)) {
13733                         u32 cfg3;
13734
13735                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13736                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13737                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13738                 }
13739
13740                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13741                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13742                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13743                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13744                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13745                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13746         }
13747 done:
13748         if (tg3_flag(tp, WOL_CAP))
13749                 device_set_wakeup_enable(&tp->pdev->dev,
13750                                          tg3_flag(tp, WOL_ENABLE));
13751         else
13752                 device_set_wakeup_capable(&tp->pdev->dev, false);
13753 }
13754
13755 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13756 {
13757         int i;
13758         u32 val;
13759
13760         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13761         tw32(OTP_CTRL, cmd);
13762
13763         /* Wait for up to 1 ms for command to execute. */
13764         for (i = 0; i < 100; i++) {
13765                 val = tr32(OTP_STATUS);
13766                 if (val & OTP_STATUS_CMD_DONE)
13767                         break;
13768                 udelay(10);
13769         }
13770
13771         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13772 }
13773
13774 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13775  * configuration is a 32-bit value that straddles the alignment boundary.
13776  * We do two 32-bit reads and then shift and merge the results.
13777  */
13778 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13779 {
13780         u32 bhalf_otp, thalf_otp;
13781
13782         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13783
13784         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13785                 return 0;
13786
13787         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13788
13789         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13790                 return 0;
13791
13792         thalf_otp = tr32(OTP_READ_DATA);
13793
13794         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13795
13796         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13797                 return 0;
13798
13799         bhalf_otp = tr32(OTP_READ_DATA);
13800
13801         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13802 }
13803
13804 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13805 {
13806         u32 adv = ADVERTISED_Autoneg;
13807
13808         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13809                 adv |= ADVERTISED_1000baseT_Half |
13810                        ADVERTISED_1000baseT_Full;
13811
13812         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13813                 adv |= ADVERTISED_100baseT_Half |
13814                        ADVERTISED_100baseT_Full |
13815                        ADVERTISED_10baseT_Half |
13816                        ADVERTISED_10baseT_Full |
13817                        ADVERTISED_TP;
13818         else
13819                 adv |= ADVERTISED_FIBRE;
13820
13821         tp->link_config.advertising = adv;
13822         tp->link_config.speed = SPEED_UNKNOWN;
13823         tp->link_config.duplex = DUPLEX_UNKNOWN;
13824         tp->link_config.autoneg = AUTONEG_ENABLE;
13825         tp->link_config.active_speed = SPEED_UNKNOWN;
13826         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13827
13828         tp->old_link = -1;
13829 }
13830
13831 static int __devinit tg3_phy_probe(struct tg3 *tp)
13832 {
13833         u32 hw_phy_id_1, hw_phy_id_2;
13834         u32 hw_phy_id, hw_phy_id_masked;
13835         int err;
13836
13837         /* flow control autonegotiation is default behavior */
13838         tg3_flag_set(tp, PAUSE_AUTONEG);
13839         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13840
13841         if (tg3_flag(tp, ENABLE_APE)) {
13842                 switch (tp->pci_fn) {
13843                 case 0:
13844                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13845                         break;
13846                 case 1:
13847                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13848                         break;
13849                 case 2:
13850                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13851                         break;
13852                 case 3:
13853                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13854                         break;
13855                 }
13856         }
13857
13858         if (tg3_flag(tp, USE_PHYLIB))
13859                 return tg3_phy_init(tp);
13860
13861         /* Reading the PHY ID register can conflict with ASF
13862          * firmware access to the PHY hardware.
13863          */
13864         err = 0;
13865         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13866                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13867         } else {
13868                 /* Now read the physical PHY_ID from the chip and verify
13869                  * that it is sane.  If it doesn't look good, we fall back
13870                  * to either the hard-coded table based PHY_ID and failing
13871                  * that the value found in the eeprom area.
13872                  */
13873                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13874                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13875
13876                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13877                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13878                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13879
13880                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13881         }
13882
13883         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13884                 tp->phy_id = hw_phy_id;
13885                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13886                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13887                 else
13888                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13889         } else {
13890                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13891                         /* Do nothing, phy ID already set up in
13892                          * tg3_get_eeprom_hw_cfg().
13893                          */
13894                 } else {
13895                         struct subsys_tbl_ent *p;
13896
13897                         /* No eeprom signature?  Try the hardcoded
13898                          * subsys device table.
13899                          */
13900                         p = tg3_lookup_by_subsys(tp);
13901                         if (!p)
13902                                 return -ENODEV;
13903
13904                         tp->phy_id = p->phy_id;
13905                         if (!tp->phy_id ||
13906                             tp->phy_id == TG3_PHY_ID_BCM8002)
13907                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13908                 }
13909         }
13910
13911         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13912             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13913              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13914              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13915               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13916              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13917               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13918                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13919
13920         tg3_phy_init_link_config(tp);
13921
13922         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13923             !tg3_flag(tp, ENABLE_APE) &&
13924             !tg3_flag(tp, ENABLE_ASF)) {
13925                 u32 bmsr, dummy;
13926
13927                 tg3_readphy(tp, MII_BMSR, &bmsr);
13928                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13929                     (bmsr & BMSR_LSTATUS))
13930                         goto skip_phy_reset;
13931
13932                 err = tg3_phy_reset(tp);
13933                 if (err)
13934                         return err;
13935
13936                 tg3_phy_set_wirespeed(tp);
13937
13938                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13939                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13940                                             tp->link_config.flowctrl);
13941
13942                         tg3_writephy(tp, MII_BMCR,
13943                                      BMCR_ANENABLE | BMCR_ANRESTART);
13944                 }
13945         }
13946
13947 skip_phy_reset:
13948         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13949                 err = tg3_init_5401phy_dsp(tp);
13950                 if (err)
13951                         return err;
13952
13953                 err = tg3_init_5401phy_dsp(tp);
13954         }
13955
13956         return err;
13957 }
13958
13959 static void __devinit tg3_read_vpd(struct tg3 *tp)
13960 {
13961         u8 *vpd_data;
13962         unsigned int block_end, rosize, len;
13963         u32 vpdlen;
13964         int j, i = 0;
13965
13966         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13967         if (!vpd_data)
13968                 goto out_no_vpd;
13969
13970         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13971         if (i < 0)
13972                 goto out_not_found;
13973
13974         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13975         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13976         i += PCI_VPD_LRDT_TAG_SIZE;
13977
13978         if (block_end > vpdlen)
13979                 goto out_not_found;
13980
13981         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13982                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13983         if (j > 0) {
13984                 len = pci_vpd_info_field_size(&vpd_data[j]);
13985
13986                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13987                 if (j + len > block_end || len != 4 ||
13988                     memcmp(&vpd_data[j], "1028", 4))
13989                         goto partno;
13990
13991                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13992                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13993                 if (j < 0)
13994                         goto partno;
13995
13996                 len = pci_vpd_info_field_size(&vpd_data[j]);
13997
13998                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13999                 if (j + len > block_end)
14000                         goto partno;
14001
14002                 memcpy(tp->fw_ver, &vpd_data[j], len);
14003                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14004         }
14005
14006 partno:
14007         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14008                                       PCI_VPD_RO_KEYWORD_PARTNO);
14009         if (i < 0)
14010                 goto out_not_found;
14011
14012         len = pci_vpd_info_field_size(&vpd_data[i]);
14013
14014         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14015         if (len > TG3_BPN_SIZE ||
14016             (len + i) > vpdlen)
14017                 goto out_not_found;
14018
14019         memcpy(tp->board_part_number, &vpd_data[i], len);
14020
14021 out_not_found:
14022         kfree(vpd_data);
14023         if (tp->board_part_number[0])
14024                 return;
14025
14026 out_no_vpd:
14027         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14028                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14029                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14030                         strcpy(tp->board_part_number, "BCM5717");
14031                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14032                         strcpy(tp->board_part_number, "BCM5718");
14033                 else
14034                         goto nomatch;
14035         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14036                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14037                         strcpy(tp->board_part_number, "BCM57780");
14038                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14039                         strcpy(tp->board_part_number, "BCM57760");
14040                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14041                         strcpy(tp->board_part_number, "BCM57790");
14042                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14043                         strcpy(tp->board_part_number, "BCM57788");
14044                 else
14045                         goto nomatch;
14046         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14047                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14048                         strcpy(tp->board_part_number, "BCM57761");
14049                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14050                         strcpy(tp->board_part_number, "BCM57765");
14051                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14052                         strcpy(tp->board_part_number, "BCM57781");
14053                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14054                         strcpy(tp->board_part_number, "BCM57785");
14055                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14056                         strcpy(tp->board_part_number, "BCM57791");
14057                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14058                         strcpy(tp->board_part_number, "BCM57795");
14059                 else
14060                         goto nomatch;
14061         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14062                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14063                         strcpy(tp->board_part_number, "BCM57762");
14064                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14065                         strcpy(tp->board_part_number, "BCM57766");
14066                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14067                         strcpy(tp->board_part_number, "BCM57782");
14068                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14069                         strcpy(tp->board_part_number, "BCM57786");
14070                 else
14071                         goto nomatch;
14072         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14073                 strcpy(tp->board_part_number, "BCM95906");
14074         } else {
14075 nomatch:
14076                 strcpy(tp->board_part_number, "none");
14077         }
14078 }
14079
14080 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14081 {
14082         u32 val;
14083
14084         if (tg3_nvram_read(tp, offset, &val) ||
14085             (val & 0xfc000000) != 0x0c000000 ||
14086             tg3_nvram_read(tp, offset + 4, &val) ||
14087             val != 0)
14088                 return 0;
14089
14090         return 1;
14091 }
14092
14093 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14094 {
14095         u32 val, offset, start, ver_offset;
14096         int i, dst_off;
14097         bool newver = false;
14098
14099         if (tg3_nvram_read(tp, 0xc, &offset) ||
14100             tg3_nvram_read(tp, 0x4, &start))
14101                 return;
14102
14103         offset = tg3_nvram_logical_addr(tp, offset);
14104
14105         if (tg3_nvram_read(tp, offset, &val))
14106                 return;
14107
14108         if ((val & 0xfc000000) == 0x0c000000) {
14109                 if (tg3_nvram_read(tp, offset + 4, &val))
14110                         return;
14111
14112                 if (val == 0)
14113                         newver = true;
14114         }
14115
14116         dst_off = strlen(tp->fw_ver);
14117
14118         if (newver) {
14119                 if (TG3_VER_SIZE - dst_off < 16 ||
14120                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14121                         return;
14122
14123                 offset = offset + ver_offset - start;
14124                 for (i = 0; i < 16; i += 4) {
14125                         __be32 v;
14126                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14127                                 return;
14128
14129                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14130                 }
14131         } else {
14132                 u32 major, minor;
14133
14134                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14135                         return;
14136
14137                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14138                         TG3_NVM_BCVER_MAJSFT;
14139                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14140                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14141                          "v%d.%02d", major, minor);
14142         }
14143 }
14144
14145 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14146 {
14147         u32 val, major, minor;
14148
14149         /* Use native endian representation */
14150         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14151                 return;
14152
14153         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14154                 TG3_NVM_HWSB_CFG1_MAJSFT;
14155         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14156                 TG3_NVM_HWSB_CFG1_MINSFT;
14157
14158         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14159 }
14160
14161 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14162 {
14163         u32 offset, major, minor, build;
14164
14165         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14166
14167         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14168                 return;
14169
14170         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14171         case TG3_EEPROM_SB_REVISION_0:
14172                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14173                 break;
14174         case TG3_EEPROM_SB_REVISION_2:
14175                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14176                 break;
14177         case TG3_EEPROM_SB_REVISION_3:
14178                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14179                 break;
14180         case TG3_EEPROM_SB_REVISION_4:
14181                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14182                 break;
14183         case TG3_EEPROM_SB_REVISION_5:
14184                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14185                 break;
14186         case TG3_EEPROM_SB_REVISION_6:
14187                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14188                 break;
14189         default:
14190                 return;
14191         }
14192
14193         if (tg3_nvram_read(tp, offset, &val))
14194                 return;
14195
14196         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14197                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14198         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14199                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14200         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14201
14202         if (minor > 99 || build > 26)
14203                 return;
14204
14205         offset = strlen(tp->fw_ver);
14206         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14207                  " v%d.%02d", major, minor);
14208
14209         if (build > 0) {
14210                 offset = strlen(tp->fw_ver);
14211                 if (offset < TG3_VER_SIZE - 1)
14212                         tp->fw_ver[offset] = 'a' + build - 1;
14213         }
14214 }
14215
14216 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14217 {
14218         u32 val, offset, start;
14219         int i, vlen;
14220
14221         for (offset = TG3_NVM_DIR_START;
14222              offset < TG3_NVM_DIR_END;
14223              offset += TG3_NVM_DIRENT_SIZE) {
14224                 if (tg3_nvram_read(tp, offset, &val))
14225                         return;
14226
14227                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14228                         break;
14229         }
14230
14231         if (offset == TG3_NVM_DIR_END)
14232                 return;
14233
14234         if (!tg3_flag(tp, 5705_PLUS))
14235                 start = 0x08000000;
14236         else if (tg3_nvram_read(tp, offset - 4, &start))
14237                 return;
14238
14239         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14240             !tg3_fw_img_is_valid(tp, offset) ||
14241             tg3_nvram_read(tp, offset + 8, &val))
14242                 return;
14243
14244         offset += val - start;
14245
14246         vlen = strlen(tp->fw_ver);
14247
14248         tp->fw_ver[vlen++] = ',';
14249         tp->fw_ver[vlen++] = ' ';
14250
14251         for (i = 0; i < 4; i++) {
14252                 __be32 v;
14253                 if (tg3_nvram_read_be32(tp, offset, &v))
14254                         return;
14255
14256                 offset += sizeof(v);
14257
14258                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14259                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14260                         break;
14261                 }
14262
14263                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14264                 vlen += sizeof(v);
14265         }
14266 }
14267
14268 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14269 {
14270         u32 apedata;
14271
14272         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14273         if (apedata != APE_SEG_SIG_MAGIC)
14274                 return;
14275
14276         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14277         if (!(apedata & APE_FW_STATUS_READY))
14278                 return;
14279
14280         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14281                 tg3_flag_set(tp, APE_HAS_NCSI);
14282 }
14283
14284 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14285 {
14286         int vlen;
14287         u32 apedata;
14288         char *fwtype;
14289
14290         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14291
14292         if (tg3_flag(tp, APE_HAS_NCSI))
14293                 fwtype = "NCSI";
14294         else
14295                 fwtype = "DASH";
14296
14297         vlen = strlen(tp->fw_ver);
14298
14299         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14300                  fwtype,
14301                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14302                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14303                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14304                  (apedata & APE_FW_VERSION_BLDMSK));
14305 }
14306
14307 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14308 {
14309         u32 val;
14310         bool vpd_vers = false;
14311
14312         if (tp->fw_ver[0] != 0)
14313                 vpd_vers = true;
14314
14315         if (tg3_flag(tp, NO_NVRAM)) {
14316                 strcat(tp->fw_ver, "sb");
14317                 return;
14318         }
14319
14320         if (tg3_nvram_read(tp, 0, &val))
14321                 return;
14322
14323         if (val == TG3_EEPROM_MAGIC)
14324                 tg3_read_bc_ver(tp);
14325         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14326                 tg3_read_sb_ver(tp, val);
14327         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14328                 tg3_read_hwsb_ver(tp);
14329
14330         if (tg3_flag(tp, ENABLE_ASF)) {
14331                 if (tg3_flag(tp, ENABLE_APE)) {
14332                         tg3_probe_ncsi(tp);
14333                         if (!vpd_vers)
14334                                 tg3_read_dash_ver(tp);
14335                 } else if (!vpd_vers) {
14336                         tg3_read_mgmtfw_ver(tp);
14337                 }
14338         }
14339
14340         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14341 }
14342
14343 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14344 {
14345         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14346                 return TG3_RX_RET_MAX_SIZE_5717;
14347         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14348                 return TG3_RX_RET_MAX_SIZE_5700;
14349         else
14350                 return TG3_RX_RET_MAX_SIZE_5705;
14351 }
14352
14353 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14354         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14355         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14356         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14357         { },
14358 };
14359
14360 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14361 {
14362         struct pci_dev *peer;
14363         unsigned int func, devnr = tp->pdev->devfn & ~7;
14364
14365         for (func = 0; func < 8; func++) {
14366                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14367                 if (peer && peer != tp->pdev)
14368                         break;
14369                 pci_dev_put(peer);
14370         }
14371         /* 5704 can be configured in single-port mode, set peer to
14372          * tp->pdev in that case.
14373          */
14374         if (!peer) {
14375                 peer = tp->pdev;
14376                 return peer;
14377         }
14378
14379         /*
14380          * We don't need to keep the refcount elevated; there's no way
14381          * to remove one half of this device without removing the other
14382          */
14383         pci_dev_put(peer);
14384
14385         return peer;
14386 }
14387
14388 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14389 {
14390         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14392                 u32 reg;
14393
14394                 /* All devices that use the alternate
14395                  * ASIC REV location have a CPMU.
14396                  */
14397                 tg3_flag_set(tp, CPMU_PRESENT);
14398
14399                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14400                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14401                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14402                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14403                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14404                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14405                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14406                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14407                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14408                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14409                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14410                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14411                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14412                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14413                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14414                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14415                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14416                 else
14417                         reg = TG3PCI_PRODID_ASICREV;
14418
14419                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14420         }
14421
14422         /* Wrong chip ID in 5752 A0. This code can be removed later
14423          * as A0 is not in production.
14424          */
14425         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14426                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14427
14428         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14429                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14430
14431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14432             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14433             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14434                 tg3_flag_set(tp, 5717_PLUS);
14435
14436         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14438                 tg3_flag_set(tp, 57765_CLASS);
14439
14440         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14441                 tg3_flag_set(tp, 57765_PLUS);
14442
14443         /* Intentionally exclude ASIC_REV_5906 */
14444         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14446             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14447             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14448             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14450             tg3_flag(tp, 57765_PLUS))
14451                 tg3_flag_set(tp, 5755_PLUS);
14452
14453         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14454             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14455                 tg3_flag_set(tp, 5780_CLASS);
14456
14457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14458             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14459             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14460             tg3_flag(tp, 5755_PLUS) ||
14461             tg3_flag(tp, 5780_CLASS))
14462                 tg3_flag_set(tp, 5750_PLUS);
14463
14464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14465             tg3_flag(tp, 5750_PLUS))
14466                 tg3_flag_set(tp, 5705_PLUS);
14467 }
14468
14469 static int __devinit tg3_get_invariants(struct tg3 *tp)
14470 {
14471         u32 misc_ctrl_reg;
14472         u32 pci_state_reg, grc_misc_cfg;
14473         u32 val;
14474         u16 pci_cmd;
14475         int err;
14476
14477         /* Force memory write invalidate off.  If we leave it on,
14478          * then on 5700_BX chips we have to enable a workaround.
14479          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14480          * to match the cacheline size.  The Broadcom driver have this
14481          * workaround but turns MWI off all the times so never uses
14482          * it.  This seems to suggest that the workaround is insufficient.
14483          */
14484         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14485         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14486         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14487
14488         /* Important! -- Make sure register accesses are byteswapped
14489          * correctly.  Also, for those chips that require it, make
14490          * sure that indirect register accesses are enabled before
14491          * the first operation.
14492          */
14493         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14494                               &misc_ctrl_reg);
14495         tp->misc_host_ctrl |= (misc_ctrl_reg &
14496                                MISC_HOST_CTRL_CHIPREV);
14497         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14498                                tp->misc_host_ctrl);
14499
14500         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14501
14502         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14503          * we need to disable memory and use config. cycles
14504          * only to access all registers. The 5702/03 chips
14505          * can mistakenly decode the special cycles from the
14506          * ICH chipsets as memory write cycles, causing corruption
14507          * of register and memory space. Only certain ICH bridges
14508          * will drive special cycles with non-zero data during the
14509          * address phase which can fall within the 5703's address
14510          * range. This is not an ICH bug as the PCI spec allows
14511          * non-zero address during special cycles. However, only
14512          * these ICH bridges are known to drive non-zero addresses
14513          * during special cycles.
14514          *
14515          * Since special cycles do not cross PCI bridges, we only
14516          * enable this workaround if the 5703 is on the secondary
14517          * bus of these ICH bridges.
14518          */
14519         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14520             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14521                 static struct tg3_dev_id {
14522                         u32     vendor;
14523                         u32     device;
14524                         u32     rev;
14525                 } ich_chipsets[] = {
14526                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14527                           PCI_ANY_ID },
14528                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14529                           PCI_ANY_ID },
14530                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14531                           0xa },
14532                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14533                           PCI_ANY_ID },
14534                         { },
14535                 };
14536                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14537                 struct pci_dev *bridge = NULL;
14538
14539                 while (pci_id->vendor != 0) {
14540                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14541                                                 bridge);
14542                         if (!bridge) {
14543                                 pci_id++;
14544                                 continue;
14545                         }
14546                         if (pci_id->rev != PCI_ANY_ID) {
14547                                 if (bridge->revision > pci_id->rev)
14548                                         continue;
14549                         }
14550                         if (bridge->subordinate &&
14551                             (bridge->subordinate->number ==
14552                              tp->pdev->bus->number)) {
14553                                 tg3_flag_set(tp, ICH_WORKAROUND);
14554                                 pci_dev_put(bridge);
14555                                 break;
14556                         }
14557                 }
14558         }
14559
14560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14561                 static struct tg3_dev_id {
14562                         u32     vendor;
14563                         u32     device;
14564                 } bridge_chipsets[] = {
14565                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14566                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14567                         { },
14568                 };
14569                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14570                 struct pci_dev *bridge = NULL;
14571
14572                 while (pci_id->vendor != 0) {
14573                         bridge = pci_get_device(pci_id->vendor,
14574                                                 pci_id->device,
14575                                                 bridge);
14576                         if (!bridge) {
14577                                 pci_id++;
14578                                 continue;
14579                         }
14580                         if (bridge->subordinate &&
14581                             (bridge->subordinate->number <=
14582                              tp->pdev->bus->number) &&
14583                             (bridge->subordinate->busn_res.end >=
14584                              tp->pdev->bus->number)) {
14585                                 tg3_flag_set(tp, 5701_DMA_BUG);
14586                                 pci_dev_put(bridge);
14587                                 break;
14588                         }
14589                 }
14590         }
14591
14592         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14593          * DMA addresses > 40-bit. This bridge may have other additional
14594          * 57xx devices behind it in some 4-port NIC designs for example.
14595          * Any tg3 device found behind the bridge will also need the 40-bit
14596          * DMA workaround.
14597          */
14598         if (tg3_flag(tp, 5780_CLASS)) {
14599                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14600                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14601         } else {
14602                 struct pci_dev *bridge = NULL;
14603
14604                 do {
14605                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14606                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14607                                                 bridge);
14608                         if (bridge && bridge->subordinate &&
14609                             (bridge->subordinate->number <=
14610                              tp->pdev->bus->number) &&
14611                             (bridge->subordinate->busn_res.end >=
14612                              tp->pdev->bus->number)) {
14613                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14614                                 pci_dev_put(bridge);
14615                                 break;
14616                         }
14617                 } while (bridge);
14618         }
14619
14620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14621             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14622                 tp->pdev_peer = tg3_find_peer(tp);
14623
14624         /* Determine TSO capabilities */
14625         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14626                 ; /* Do nothing. HW bug. */
14627         else if (tg3_flag(tp, 57765_PLUS))
14628                 tg3_flag_set(tp, HW_TSO_3);
14629         else if (tg3_flag(tp, 5755_PLUS) ||
14630                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14631                 tg3_flag_set(tp, HW_TSO_2);
14632         else if (tg3_flag(tp, 5750_PLUS)) {
14633                 tg3_flag_set(tp, HW_TSO_1);
14634                 tg3_flag_set(tp, TSO_BUG);
14635                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14636                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14637                         tg3_flag_clear(tp, TSO_BUG);
14638         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14639                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14640                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14641                         tg3_flag_set(tp, TSO_BUG);
14642                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14643                         tp->fw_needed = FIRMWARE_TG3TSO5;
14644                 else
14645                         tp->fw_needed = FIRMWARE_TG3TSO;
14646         }
14647
14648         /* Selectively allow TSO based on operating conditions */
14649         if (tg3_flag(tp, HW_TSO_1) ||
14650             tg3_flag(tp, HW_TSO_2) ||
14651             tg3_flag(tp, HW_TSO_3) ||
14652             tp->fw_needed) {
14653                 /* For firmware TSO, assume ASF is disabled.
14654                  * We'll disable TSO later if we discover ASF
14655                  * is enabled in tg3_get_eeprom_hw_cfg().
14656                  */
14657                 tg3_flag_set(tp, TSO_CAPABLE);
14658         } else {
14659                 tg3_flag_clear(tp, TSO_CAPABLE);
14660                 tg3_flag_clear(tp, TSO_BUG);
14661                 tp->fw_needed = NULL;
14662         }
14663
14664         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14665                 tp->fw_needed = FIRMWARE_TG3;
14666
14667         tp->irq_max = 1;
14668
14669         if (tg3_flag(tp, 5750_PLUS)) {
14670                 tg3_flag_set(tp, SUPPORT_MSI);
14671                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14672                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14673                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14674                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14675                      tp->pdev_peer == tp->pdev))
14676                         tg3_flag_clear(tp, SUPPORT_MSI);
14677
14678                 if (tg3_flag(tp, 5755_PLUS) ||
14679                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14680                         tg3_flag_set(tp, 1SHOT_MSI);
14681                 }
14682
14683                 if (tg3_flag(tp, 57765_PLUS)) {
14684                         tg3_flag_set(tp, SUPPORT_MSIX);
14685                         tp->irq_max = TG3_IRQ_MAX_VECS;
14686                 }
14687         }
14688
14689         tp->txq_max = 1;
14690         tp->rxq_max = 1;
14691         if (tp->irq_max > 1) {
14692                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14693                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14694
14695                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14696                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14697                         tp->txq_max = tp->irq_max - 1;
14698         }
14699
14700         if (tg3_flag(tp, 5755_PLUS) ||
14701             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14702                 tg3_flag_set(tp, SHORT_DMA_BUG);
14703
14704         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14705                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14706
14707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14710                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14711
14712         if (tg3_flag(tp, 57765_PLUS) &&
14713             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14714                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14715
14716         if (!tg3_flag(tp, 5705_PLUS) ||
14717             tg3_flag(tp, 5780_CLASS) ||
14718             tg3_flag(tp, USE_JUMBO_BDFLAG))
14719                 tg3_flag_set(tp, JUMBO_CAPABLE);
14720
14721         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14722                               &pci_state_reg);
14723
14724         if (pci_is_pcie(tp->pdev)) {
14725                 u16 lnkctl;
14726
14727                 tg3_flag_set(tp, PCI_EXPRESS);
14728
14729                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14730                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14731                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14732                             ASIC_REV_5906) {
14733                                 tg3_flag_clear(tp, HW_TSO_2);
14734                                 tg3_flag_clear(tp, TSO_CAPABLE);
14735                         }
14736                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14737                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14738                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14739                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14740                                 tg3_flag_set(tp, CLKREQ_BUG);
14741                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14742                         tg3_flag_set(tp, L1PLLPD_EN);
14743                 }
14744         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14745                 /* BCM5785 devices are effectively PCIe devices, and should
14746                  * follow PCIe codepaths, but do not have a PCIe capabilities
14747                  * section.
14748                  */
14749                 tg3_flag_set(tp, PCI_EXPRESS);
14750         } else if (!tg3_flag(tp, 5705_PLUS) ||
14751                    tg3_flag(tp, 5780_CLASS)) {
14752                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14753                 if (!tp->pcix_cap) {
14754                         dev_err(&tp->pdev->dev,
14755                                 "Cannot find PCI-X capability, aborting\n");
14756                         return -EIO;
14757                 }
14758
14759                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14760                         tg3_flag_set(tp, PCIX_MODE);
14761         }
14762
14763         /* If we have an AMD 762 or VIA K8T800 chipset, write
14764          * reordering to the mailbox registers done by the host
14765          * controller can cause major troubles.  We read back from
14766          * every mailbox register write to force the writes to be
14767          * posted to the chip in order.
14768          */
14769         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14770             !tg3_flag(tp, PCI_EXPRESS))
14771                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14772
14773         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14774                              &tp->pci_cacheline_sz);
14775         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14776                              &tp->pci_lat_timer);
14777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14778             tp->pci_lat_timer < 64) {
14779                 tp->pci_lat_timer = 64;
14780                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14781                                       tp->pci_lat_timer);
14782         }
14783
14784         /* Important! -- It is critical that the PCI-X hw workaround
14785          * situation is decided before the first MMIO register access.
14786          */
14787         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14788                 /* 5700 BX chips need to have their TX producer index
14789                  * mailboxes written twice to workaround a bug.
14790                  */
14791                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14792
14793                 /* If we are in PCI-X mode, enable register write workaround.
14794                  *
14795                  * The workaround is to use indirect register accesses
14796                  * for all chip writes not to mailbox registers.
14797                  */
14798                 if (tg3_flag(tp, PCIX_MODE)) {
14799                         u32 pm_reg;
14800
14801                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14802
14803                         /* The chip can have it's power management PCI config
14804                          * space registers clobbered due to this bug.
14805                          * So explicitly force the chip into D0 here.
14806                          */
14807                         pci_read_config_dword(tp->pdev,
14808                                               tp->pm_cap + PCI_PM_CTRL,
14809                                               &pm_reg);
14810                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14811                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14812                         pci_write_config_dword(tp->pdev,
14813                                                tp->pm_cap + PCI_PM_CTRL,
14814                                                pm_reg);
14815
14816                         /* Also, force SERR#/PERR# in PCI command. */
14817                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14818                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14819                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14820                 }
14821         }
14822
14823         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14824                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14825         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14826                 tg3_flag_set(tp, PCI_32BIT);
14827
14828         /* Chip-specific fixup from Broadcom driver */
14829         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14830             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14831                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14832                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14833         }
14834
14835         /* Default fast path register access methods */
14836         tp->read32 = tg3_read32;
14837         tp->write32 = tg3_write32;
14838         tp->read32_mbox = tg3_read32;
14839         tp->write32_mbox = tg3_write32;
14840         tp->write32_tx_mbox = tg3_write32;
14841         tp->write32_rx_mbox = tg3_write32;
14842
14843         /* Various workaround register access methods */
14844         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14845                 tp->write32 = tg3_write_indirect_reg32;
14846         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14847                  (tg3_flag(tp, PCI_EXPRESS) &&
14848                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14849                 /*
14850                  * Back to back register writes can cause problems on these
14851                  * chips, the workaround is to read back all reg writes
14852                  * except those to mailbox regs.
14853                  *
14854                  * See tg3_write_indirect_reg32().
14855                  */
14856                 tp->write32 = tg3_write_flush_reg32;
14857         }
14858
14859         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14860                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14861                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14862                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14863         }
14864
14865         if (tg3_flag(tp, ICH_WORKAROUND)) {
14866                 tp->read32 = tg3_read_indirect_reg32;
14867                 tp->write32 = tg3_write_indirect_reg32;
14868                 tp->read32_mbox = tg3_read_indirect_mbox;
14869                 tp->write32_mbox = tg3_write_indirect_mbox;
14870                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14871                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14872
14873                 iounmap(tp->regs);
14874                 tp->regs = NULL;
14875
14876                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14877                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14878                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14879         }
14880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14881                 tp->read32_mbox = tg3_read32_mbox_5906;
14882                 tp->write32_mbox = tg3_write32_mbox_5906;
14883                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14884                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14885         }
14886
14887         if (tp->write32 == tg3_write_indirect_reg32 ||
14888             (tg3_flag(tp, PCIX_MODE) &&
14889              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14890               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14891                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14892
14893         /* The memory arbiter has to be enabled in order for SRAM accesses
14894          * to succeed.  Normally on powerup the tg3 chip firmware will make
14895          * sure it is enabled, but other entities such as system netboot
14896          * code might disable it.
14897          */
14898         val = tr32(MEMARB_MODE);
14899         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14900
14901         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14903             tg3_flag(tp, 5780_CLASS)) {
14904                 if (tg3_flag(tp, PCIX_MODE)) {
14905                         pci_read_config_dword(tp->pdev,
14906                                               tp->pcix_cap + PCI_X_STATUS,
14907                                               &val);
14908                         tp->pci_fn = val & 0x7;
14909                 }
14910         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14911                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14912                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14913                     NIC_SRAM_CPMUSTAT_SIG) {
14914                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14915                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14916                 }
14917         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14918                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14919                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14920                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14921                     NIC_SRAM_CPMUSTAT_SIG) {
14922                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14923                                      TG3_CPMU_STATUS_FSHFT_5719;
14924                 }
14925         }
14926
14927         /* Get eeprom hw config before calling tg3_set_power_state().
14928          * In particular, the TG3_FLAG_IS_NIC flag must be
14929          * determined before calling tg3_set_power_state() so that
14930          * we know whether or not to switch out of Vaux power.
14931          * When the flag is set, it means that GPIO1 is used for eeprom
14932          * write protect and also implies that it is a LOM where GPIOs
14933          * are not used to switch power.
14934          */
14935         tg3_get_eeprom_hw_cfg(tp);
14936
14937         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14938                 tg3_flag_clear(tp, TSO_CAPABLE);
14939                 tg3_flag_clear(tp, TSO_BUG);
14940                 tp->fw_needed = NULL;
14941         }
14942
14943         if (tg3_flag(tp, ENABLE_APE)) {
14944                 /* Allow reads and writes to the
14945                  * APE register and memory space.
14946                  */
14947                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14948                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14949                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14950                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14951                                        pci_state_reg);
14952
14953                 tg3_ape_lock_init(tp);
14954         }
14955
14956         /* Set up tp->grc_local_ctrl before calling
14957          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14958          * will bring 5700's external PHY out of reset.
14959          * It is also used as eeprom write protect on LOMs.
14960          */
14961         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14963             tg3_flag(tp, EEPROM_WRITE_PROT))
14964                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14965                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14966         /* Unused GPIO3 must be driven as output on 5752 because there
14967          * are no pull-up resistors on unused GPIO pins.
14968          */
14969         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14970                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14971
14972         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14974             tg3_flag(tp, 57765_CLASS))
14975                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14976
14977         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14978             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14979                 /* Turn off the debug UART. */
14980                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14981                 if (tg3_flag(tp, IS_NIC))
14982                         /* Keep VMain power. */
14983                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14984                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14985         }
14986
14987         /* Switch out of Vaux if it is a NIC */
14988         tg3_pwrsrc_switch_to_vmain(tp);
14989
14990         /* Derive initial jumbo mode from MTU assigned in
14991          * ether_setup() via the alloc_etherdev() call
14992          */
14993         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14994                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14995
14996         /* Determine WakeOnLan speed to use. */
14997         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14998             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14999             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15000             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15001                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15002         } else {
15003                 tg3_flag_set(tp, WOL_SPEED_100MB);
15004         }
15005
15006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15007                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15008
15009         /* A few boards don't want Ethernet@WireSpeed phy feature */
15010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15011             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15012              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15013              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15014             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15015             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15016                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15017
15018         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15019             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15020                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15021         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15022                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15023
15024         if (tg3_flag(tp, 5705_PLUS) &&
15025             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15026             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15027             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15028             !tg3_flag(tp, 57765_PLUS)) {
15029                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15030                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15031                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15032                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15033                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15034                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15035                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15036                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15037                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15038                 } else
15039                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15040         }
15041
15042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15043             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15044                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15045                 if (tp->phy_otp == 0)
15046                         tp->phy_otp = TG3_OTP_DEFAULT;
15047         }
15048
15049         if (tg3_flag(tp, CPMU_PRESENT))
15050                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15051         else
15052                 tp->mi_mode = MAC_MI_MODE_BASE;
15053
15054         tp->coalesce_mode = 0;
15055         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15056             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15057                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15058
15059         /* Set these bits to enable statistics workaround. */
15060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15061             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15062             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15063                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15064                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15065         }
15066
15067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15068             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15069                 tg3_flag_set(tp, USE_PHYLIB);
15070
15071         err = tg3_mdio_init(tp);
15072         if (err)
15073                 return err;
15074
15075         /* Initialize data/descriptor byte/word swapping. */
15076         val = tr32(GRC_MODE);
15077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15078                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15079                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15080                         GRC_MODE_B2HRX_ENABLE |
15081                         GRC_MODE_HTX2B_ENABLE |
15082                         GRC_MODE_HOST_STACKUP);
15083         else
15084                 val &= GRC_MODE_HOST_STACKUP;
15085
15086         tw32(GRC_MODE, val | tp->grc_mode);
15087
15088         tg3_switch_clocks(tp);
15089
15090         /* Clear this out for sanity. */
15091         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15092
15093         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15094                               &pci_state_reg);
15095         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15096             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15097                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15098
15099                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15100                     chiprevid == CHIPREV_ID_5701_B0 ||
15101                     chiprevid == CHIPREV_ID_5701_B2 ||
15102                     chiprevid == CHIPREV_ID_5701_B5) {
15103                         void __iomem *sram_base;
15104
15105                         /* Write some dummy words into the SRAM status block
15106                          * area, see if it reads back correctly.  If the return
15107                          * value is bad, force enable the PCIX workaround.
15108                          */
15109                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15110
15111                         writel(0x00000000, sram_base);
15112                         writel(0x00000000, sram_base + 4);
15113                         writel(0xffffffff, sram_base + 4);
15114                         if (readl(sram_base) != 0x00000000)
15115                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15116                 }
15117         }
15118
15119         udelay(50);
15120         tg3_nvram_init(tp);
15121
15122         grc_misc_cfg = tr32(GRC_MISC_CFG);
15123         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15124
15125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15126             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15127              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15128                 tg3_flag_set(tp, IS_5788);
15129
15130         if (!tg3_flag(tp, IS_5788) &&
15131             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15132                 tg3_flag_set(tp, TAGGED_STATUS);
15133         if (tg3_flag(tp, TAGGED_STATUS)) {
15134                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15135                                       HOSTCC_MODE_CLRTICK_TXBD);
15136
15137                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15138                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15139                                        tp->misc_host_ctrl);
15140         }
15141
15142         /* Preserve the APE MAC_MODE bits */
15143         if (tg3_flag(tp, ENABLE_APE))
15144                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15145         else
15146                 tp->mac_mode = 0;
15147
15148         /* these are limited to 10/100 only */
15149         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15150              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15151             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15152              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15153              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15154               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15155               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15156             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15157              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15158               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15159               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15160             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15161             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15162             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15163             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15164                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15165
15166         err = tg3_phy_probe(tp);
15167         if (err) {
15168                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15169                 /* ... but do not return immediately ... */
15170                 tg3_mdio_fini(tp);
15171         }
15172
15173         tg3_read_vpd(tp);
15174         tg3_read_fw_ver(tp);
15175
15176         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15177                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15178         } else {
15179                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15180                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15181                 else
15182                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15183         }
15184
15185         /* 5700 {AX,BX} chips have a broken status block link
15186          * change bit implementation, so we must use the
15187          * status register in those cases.
15188          */
15189         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15190                 tg3_flag_set(tp, USE_LINKCHG_REG);
15191         else
15192                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15193
15194         /* The led_ctrl is set during tg3_phy_probe, here we might
15195          * have to force the link status polling mechanism based
15196          * upon subsystem IDs.
15197          */
15198         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15199             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15200             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15201                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15202                 tg3_flag_set(tp, USE_LINKCHG_REG);
15203         }
15204
15205         /* For all SERDES we poll the MAC status register. */
15206         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15207                 tg3_flag_set(tp, POLL_SERDES);
15208         else
15209                 tg3_flag_clear(tp, POLL_SERDES);
15210
15211         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15212         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15214             tg3_flag(tp, PCIX_MODE)) {
15215                 tp->rx_offset = NET_SKB_PAD;
15216 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15217                 tp->rx_copy_thresh = ~(u16)0;
15218 #endif
15219         }
15220
15221         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15222         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15223         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15224
15225         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15226
15227         /* Increment the rx prod index on the rx std ring by at most
15228          * 8 for these chips to workaround hw errata.
15229          */
15230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15232             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15233                 tp->rx_std_max_post = 8;
15234
15235         if (tg3_flag(tp, ASPM_WORKAROUND))
15236                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15237                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15238
15239         return err;
15240 }
15241
15242 #ifdef CONFIG_SPARC
15243 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15244 {
15245         struct net_device *dev = tp->dev;
15246         struct pci_dev *pdev = tp->pdev;
15247         struct device_node *dp = pci_device_to_OF_node(pdev);
15248         const unsigned char *addr;
15249         int len;
15250
15251         addr = of_get_property(dp, "local-mac-address", &len);
15252         if (addr && len == 6) {
15253                 memcpy(dev->dev_addr, addr, 6);
15254                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15255                 return 0;
15256         }
15257         return -ENODEV;
15258 }
15259
15260 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15261 {
15262         struct net_device *dev = tp->dev;
15263
15264         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15265         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15266         return 0;
15267 }
15268 #endif
15269
15270 static int __devinit tg3_get_device_address(struct tg3 *tp)
15271 {
15272         struct net_device *dev = tp->dev;
15273         u32 hi, lo, mac_offset;
15274         int addr_ok = 0;
15275
15276 #ifdef CONFIG_SPARC
15277         if (!tg3_get_macaddr_sparc(tp))
15278                 return 0;
15279 #endif
15280
15281         mac_offset = 0x7c;
15282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15283             tg3_flag(tp, 5780_CLASS)) {
15284                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15285                         mac_offset = 0xcc;
15286                 if (tg3_nvram_lock(tp))
15287                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15288                 else
15289                         tg3_nvram_unlock(tp);
15290         } else if (tg3_flag(tp, 5717_PLUS)) {
15291                 if (tp->pci_fn & 1)
15292                         mac_offset = 0xcc;
15293                 if (tp->pci_fn > 1)
15294                         mac_offset += 0x18c;
15295         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15296                 mac_offset = 0x10;
15297
15298         /* First try to get it from MAC address mailbox. */
15299         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15300         if ((hi >> 16) == 0x484b) {
15301                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15302                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15303
15304                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15305                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15306                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15307                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15308                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15309
15310                 /* Some old bootcode may report a 0 MAC address in SRAM */
15311                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15312         }
15313         if (!addr_ok) {
15314                 /* Next, try NVRAM. */
15315                 if (!tg3_flag(tp, NO_NVRAM) &&
15316                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15317                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15318                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15319                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15320                 }
15321                 /* Finally just fetch it out of the MAC control regs. */
15322                 else {
15323                         hi = tr32(MAC_ADDR_0_HIGH);
15324                         lo = tr32(MAC_ADDR_0_LOW);
15325
15326                         dev->dev_addr[5] = lo & 0xff;
15327                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15328                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15329                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15330                         dev->dev_addr[1] = hi & 0xff;
15331                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15332                 }
15333         }
15334
15335         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15336 #ifdef CONFIG_SPARC
15337                 if (!tg3_get_default_macaddr_sparc(tp))
15338                         return 0;
15339 #endif
15340                 return -EINVAL;
15341         }
15342         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15343         return 0;
15344 }
15345
15346 #define BOUNDARY_SINGLE_CACHELINE       1
15347 #define BOUNDARY_MULTI_CACHELINE        2
15348
15349 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15350 {
15351         int cacheline_size;
15352         u8 byte;
15353         int goal;
15354
15355         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15356         if (byte == 0)
15357                 cacheline_size = 1024;
15358         else
15359                 cacheline_size = (int) byte * 4;
15360
15361         /* On 5703 and later chips, the boundary bits have no
15362          * effect.
15363          */
15364         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15365             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15366             !tg3_flag(tp, PCI_EXPRESS))
15367                 goto out;
15368
15369 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15370         goal = BOUNDARY_MULTI_CACHELINE;
15371 #else
15372 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15373         goal = BOUNDARY_SINGLE_CACHELINE;
15374 #else
15375         goal = 0;
15376 #endif
15377 #endif
15378
15379         if (tg3_flag(tp, 57765_PLUS)) {
15380                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15381                 goto out;
15382         }
15383
15384         if (!goal)
15385                 goto out;
15386
15387         /* PCI controllers on most RISC systems tend to disconnect
15388          * when a device tries to burst across a cache-line boundary.
15389          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15390          *
15391          * Unfortunately, for PCI-E there are only limited
15392          * write-side controls for this, and thus for reads
15393          * we will still get the disconnects.  We'll also waste
15394          * these PCI cycles for both read and write for chips
15395          * other than 5700 and 5701 which do not implement the
15396          * boundary bits.
15397          */
15398         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15399                 switch (cacheline_size) {
15400                 case 16:
15401                 case 32:
15402                 case 64:
15403                 case 128:
15404                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15405                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15406                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15407                         } else {
15408                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15409                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15410                         }
15411                         break;
15412
15413                 case 256:
15414                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15415                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15416                         break;
15417
15418                 default:
15419                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15420                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15421                         break;
15422                 }
15423         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15424                 switch (cacheline_size) {
15425                 case 16:
15426                 case 32:
15427                 case 64:
15428                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15429                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15430                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15431                                 break;
15432                         }
15433                         /* fallthrough */
15434                 case 128:
15435                 default:
15436                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15437                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15438                         break;
15439                 }
15440         } else {
15441                 switch (cacheline_size) {
15442                 case 16:
15443                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15444                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15445                                         DMA_RWCTRL_WRITE_BNDRY_16);
15446                                 break;
15447                         }
15448                         /* fallthrough */
15449                 case 32:
15450                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15451                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15452                                         DMA_RWCTRL_WRITE_BNDRY_32);
15453                                 break;
15454                         }
15455                         /* fallthrough */
15456                 case 64:
15457                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15458                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15459                                         DMA_RWCTRL_WRITE_BNDRY_64);
15460                                 break;
15461                         }
15462                         /* fallthrough */
15463                 case 128:
15464                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15465                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15466                                         DMA_RWCTRL_WRITE_BNDRY_128);
15467                                 break;
15468                         }
15469                         /* fallthrough */
15470                 case 256:
15471                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15472                                 DMA_RWCTRL_WRITE_BNDRY_256);
15473                         break;
15474                 case 512:
15475                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15476                                 DMA_RWCTRL_WRITE_BNDRY_512);
15477                         break;
15478                 case 1024:
15479                 default:
15480                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15481                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15482                         break;
15483                 }
15484         }
15485
15486 out:
15487         return val;
15488 }
15489
15490 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15491 {
15492         struct tg3_internal_buffer_desc test_desc;
15493         u32 sram_dma_descs;
15494         int i, ret;
15495
15496         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15497
15498         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15499         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15500         tw32(RDMAC_STATUS, 0);
15501         tw32(WDMAC_STATUS, 0);
15502
15503         tw32(BUFMGR_MODE, 0);
15504         tw32(FTQ_RESET, 0);
15505
15506         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15507         test_desc.addr_lo = buf_dma & 0xffffffff;
15508         test_desc.nic_mbuf = 0x00002100;
15509         test_desc.len = size;
15510
15511         /*
15512          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15513          * the *second* time the tg3 driver was getting loaded after an
15514          * initial scan.
15515          *
15516          * Broadcom tells me:
15517          *   ...the DMA engine is connected to the GRC block and a DMA
15518          *   reset may affect the GRC block in some unpredictable way...
15519          *   The behavior of resets to individual blocks has not been tested.
15520          *
15521          * Broadcom noted the GRC reset will also reset all sub-components.
15522          */
15523         if (to_device) {
15524                 test_desc.cqid_sqid = (13 << 8) | 2;
15525
15526                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15527                 udelay(40);
15528         } else {
15529                 test_desc.cqid_sqid = (16 << 8) | 7;
15530
15531                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15532                 udelay(40);
15533         }
15534         test_desc.flags = 0x00000005;
15535
15536         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15537                 u32 val;
15538
15539                 val = *(((u32 *)&test_desc) + i);
15540                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15541                                        sram_dma_descs + (i * sizeof(u32)));
15542                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15543         }
15544         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15545
15546         if (to_device)
15547                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15548         else
15549                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15550
15551         ret = -ENODEV;
15552         for (i = 0; i < 40; i++) {
15553                 u32 val;
15554
15555                 if (to_device)
15556                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15557                 else
15558                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15559                 if ((val & 0xffff) == sram_dma_descs) {
15560                         ret = 0;
15561                         break;
15562                 }
15563
15564                 udelay(100);
15565         }
15566
15567         return ret;
15568 }
15569
15570 #define TEST_BUFFER_SIZE        0x2000
15571
15572 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15573         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15574         { },
15575 };
15576
15577 static int __devinit tg3_test_dma(struct tg3 *tp)
15578 {
15579         dma_addr_t buf_dma;
15580         u32 *buf, saved_dma_rwctrl;
15581         int ret = 0;
15582
15583         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15584                                  &buf_dma, GFP_KERNEL);
15585         if (!buf) {
15586                 ret = -ENOMEM;
15587                 goto out_nofree;
15588         }
15589
15590         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15591                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15592
15593         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15594
15595         if (tg3_flag(tp, 57765_PLUS))
15596                 goto out;
15597
15598         if (tg3_flag(tp, PCI_EXPRESS)) {
15599                 /* DMA read watermark not used on PCIE */
15600                 tp->dma_rwctrl |= 0x00180000;
15601         } else if (!tg3_flag(tp, PCIX_MODE)) {
15602                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15603                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15604                         tp->dma_rwctrl |= 0x003f0000;
15605                 else
15606                         tp->dma_rwctrl |= 0x003f000f;
15607         } else {
15608                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15609                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15610                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15611                         u32 read_water = 0x7;
15612
15613                         /* If the 5704 is behind the EPB bridge, we can
15614                          * do the less restrictive ONE_DMA workaround for
15615                          * better performance.
15616                          */
15617                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15618                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15619                                 tp->dma_rwctrl |= 0x8000;
15620                         else if (ccval == 0x6 || ccval == 0x7)
15621                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15622
15623                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15624                                 read_water = 4;
15625                         /* Set bit 23 to enable PCIX hw bug fix */
15626                         tp->dma_rwctrl |=
15627                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15628                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15629                                 (1 << 23);
15630                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15631                         /* 5780 always in PCIX mode */
15632                         tp->dma_rwctrl |= 0x00144000;
15633                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15634                         /* 5714 always in PCIX mode */
15635                         tp->dma_rwctrl |= 0x00148000;
15636                 } else {
15637                         tp->dma_rwctrl |= 0x001b000f;
15638                 }
15639         }
15640
15641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15642             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15643                 tp->dma_rwctrl &= 0xfffffff0;
15644
15645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15646             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15647                 /* Remove this if it causes problems for some boards. */
15648                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15649
15650                 /* On 5700/5701 chips, we need to set this bit.
15651                  * Otherwise the chip will issue cacheline transactions
15652                  * to streamable DMA memory with not all the byte
15653                  * enables turned on.  This is an error on several
15654                  * RISC PCI controllers, in particular sparc64.
15655                  *
15656                  * On 5703/5704 chips, this bit has been reassigned
15657                  * a different meaning.  In particular, it is used
15658                  * on those chips to enable a PCI-X workaround.
15659                  */
15660                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15661         }
15662
15663         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15664
15665 #if 0
15666         /* Unneeded, already done by tg3_get_invariants.  */
15667         tg3_switch_clocks(tp);
15668 #endif
15669
15670         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15671             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15672                 goto out;
15673
15674         /* It is best to perform DMA test with maximum write burst size
15675          * to expose the 5700/5701 write DMA bug.
15676          */
15677         saved_dma_rwctrl = tp->dma_rwctrl;
15678         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15679         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15680
15681         while (1) {
15682                 u32 *p = buf, i;
15683
15684                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15685                         p[i] = i;
15686
15687                 /* Send the buffer to the chip. */
15688                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15689                 if (ret) {
15690                         dev_err(&tp->pdev->dev,
15691                                 "%s: Buffer write failed. err = %d\n",
15692                                 __func__, ret);
15693                         break;
15694                 }
15695
15696 #if 0
15697                 /* validate data reached card RAM correctly. */
15698                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15699                         u32 val;
15700                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15701                         if (le32_to_cpu(val) != p[i]) {
15702                                 dev_err(&tp->pdev->dev,
15703                                         "%s: Buffer corrupted on device! "
15704                                         "(%d != %d)\n", __func__, val, i);
15705                                 /* ret = -ENODEV here? */
15706                         }
15707                         p[i] = 0;
15708                 }
15709 #endif
15710                 /* Now read it back. */
15711                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15712                 if (ret) {
15713                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15714                                 "err = %d\n", __func__, ret);
15715                         break;
15716                 }
15717
15718                 /* Verify it. */
15719                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15720                         if (p[i] == i)
15721                                 continue;
15722
15723                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15724                             DMA_RWCTRL_WRITE_BNDRY_16) {
15725                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15726                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15727                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15728                                 break;
15729                         } else {
15730                                 dev_err(&tp->pdev->dev,
15731                                         "%s: Buffer corrupted on read back! "
15732                                         "(%d != %d)\n", __func__, p[i], i);
15733                                 ret = -ENODEV;
15734                                 goto out;
15735                         }
15736                 }
15737
15738                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15739                         /* Success. */
15740                         ret = 0;
15741                         break;
15742                 }
15743         }
15744         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15745             DMA_RWCTRL_WRITE_BNDRY_16) {
15746                 /* DMA test passed without adjusting DMA boundary,
15747                  * now look for chipsets that are known to expose the
15748                  * DMA bug without failing the test.
15749                  */
15750                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15751                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15752                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15753                 } else {
15754                         /* Safe to use the calculated DMA boundary. */
15755                         tp->dma_rwctrl = saved_dma_rwctrl;
15756                 }
15757
15758                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15759         }
15760
15761 out:
15762         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15763 out_nofree:
15764         return ret;
15765 }
15766
15767 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15768 {
15769         if (tg3_flag(tp, 57765_PLUS)) {
15770                 tp->bufmgr_config.mbuf_read_dma_low_water =
15771                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15772                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15773                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15774                 tp->bufmgr_config.mbuf_high_water =
15775                         DEFAULT_MB_HIGH_WATER_57765;
15776
15777                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15778                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15779                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15780                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15781                 tp->bufmgr_config.mbuf_high_water_jumbo =
15782                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15783         } else if (tg3_flag(tp, 5705_PLUS)) {
15784                 tp->bufmgr_config.mbuf_read_dma_low_water =
15785                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15786                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15787                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15788                 tp->bufmgr_config.mbuf_high_water =
15789                         DEFAULT_MB_HIGH_WATER_5705;
15790                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15791                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15792                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15793                         tp->bufmgr_config.mbuf_high_water =
15794                                 DEFAULT_MB_HIGH_WATER_5906;
15795                 }
15796
15797                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15798                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15799                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15800                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15801                 tp->bufmgr_config.mbuf_high_water_jumbo =
15802                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15803         } else {
15804                 tp->bufmgr_config.mbuf_read_dma_low_water =
15805                         DEFAULT_MB_RDMA_LOW_WATER;
15806                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15807                         DEFAULT_MB_MACRX_LOW_WATER;
15808                 tp->bufmgr_config.mbuf_high_water =
15809                         DEFAULT_MB_HIGH_WATER;
15810
15811                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15812                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15813                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15814                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15815                 tp->bufmgr_config.mbuf_high_water_jumbo =
15816                         DEFAULT_MB_HIGH_WATER_JUMBO;
15817         }
15818
15819         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15820         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15821 }
15822
15823 static char * __devinit tg3_phy_string(struct tg3 *tp)
15824 {
15825         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15826         case TG3_PHY_ID_BCM5400:        return "5400";
15827         case TG3_PHY_ID_BCM5401:        return "5401";
15828         case TG3_PHY_ID_BCM5411:        return "5411";
15829         case TG3_PHY_ID_BCM5701:        return "5701";
15830         case TG3_PHY_ID_BCM5703:        return "5703";
15831         case TG3_PHY_ID_BCM5704:        return "5704";
15832         case TG3_PHY_ID_BCM5705:        return "5705";
15833         case TG3_PHY_ID_BCM5750:        return "5750";
15834         case TG3_PHY_ID_BCM5752:        return "5752";
15835         case TG3_PHY_ID_BCM5714:        return "5714";
15836         case TG3_PHY_ID_BCM5780:        return "5780";
15837         case TG3_PHY_ID_BCM5755:        return "5755";
15838         case TG3_PHY_ID_BCM5787:        return "5787";
15839         case TG3_PHY_ID_BCM5784:        return "5784";
15840         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15841         case TG3_PHY_ID_BCM5906:        return "5906";
15842         case TG3_PHY_ID_BCM5761:        return "5761";
15843         case TG3_PHY_ID_BCM5718C:       return "5718C";
15844         case TG3_PHY_ID_BCM5718S:       return "5718S";
15845         case TG3_PHY_ID_BCM57765:       return "57765";
15846         case TG3_PHY_ID_BCM5719C:       return "5719C";
15847         case TG3_PHY_ID_BCM5720C:       return "5720C";
15848         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15849         case 0:                 return "serdes";
15850         default:                return "unknown";
15851         }
15852 }
15853
15854 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15855 {
15856         if (tg3_flag(tp, PCI_EXPRESS)) {
15857                 strcpy(str, "PCI Express");
15858                 return str;
15859         } else if (tg3_flag(tp, PCIX_MODE)) {
15860                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15861
15862                 strcpy(str, "PCIX:");
15863
15864                 if ((clock_ctrl == 7) ||
15865                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15866                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15867                         strcat(str, "133MHz");
15868                 else if (clock_ctrl == 0)
15869                         strcat(str, "33MHz");
15870                 else if (clock_ctrl == 2)
15871                         strcat(str, "50MHz");
15872                 else if (clock_ctrl == 4)
15873                         strcat(str, "66MHz");
15874                 else if (clock_ctrl == 6)
15875                         strcat(str, "100MHz");
15876         } else {
15877                 strcpy(str, "PCI:");
15878                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15879                         strcat(str, "66MHz");
15880                 else
15881                         strcat(str, "33MHz");
15882         }
15883         if (tg3_flag(tp, PCI_32BIT))
15884                 strcat(str, ":32-bit");
15885         else
15886                 strcat(str, ":64-bit");
15887         return str;
15888 }
15889
15890 static void __devinit tg3_init_coal(struct tg3 *tp)
15891 {
15892         struct ethtool_coalesce *ec = &tp->coal;
15893
15894         memset(ec, 0, sizeof(*ec));
15895         ec->cmd = ETHTOOL_GCOALESCE;
15896         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15897         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15898         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15899         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15900         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15901         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15902         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15903         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15904         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15905
15906         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15907                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15908                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15909                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15910                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15911                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15912         }
15913
15914         if (tg3_flag(tp, 5705_PLUS)) {
15915                 ec->rx_coalesce_usecs_irq = 0;
15916                 ec->tx_coalesce_usecs_irq = 0;
15917                 ec->stats_block_coalesce_usecs = 0;
15918         }
15919 }
15920
15921 static int __devinit tg3_init_one(struct pci_dev *pdev,
15922                                   const struct pci_device_id *ent)
15923 {
15924         struct net_device *dev;
15925         struct tg3 *tp;
15926         int i, err, pm_cap;
15927         u32 sndmbx, rcvmbx, intmbx;
15928         char str[40];
15929         u64 dma_mask, persist_dma_mask;
15930         netdev_features_t features = 0;
15931
15932         printk_once(KERN_INFO "%s\n", version);
15933
15934         err = pci_enable_device(pdev);
15935         if (err) {
15936                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15937                 return err;
15938         }
15939
15940         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15941         if (err) {
15942                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15943                 goto err_out_disable_pdev;
15944         }
15945
15946         pci_set_master(pdev);
15947
15948         /* Find power-management capability. */
15949         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15950         if (pm_cap == 0) {
15951                 dev_err(&pdev->dev,
15952                         "Cannot find Power Management capability, aborting\n");
15953                 err = -EIO;
15954                 goto err_out_free_res;
15955         }
15956
15957         err = pci_set_power_state(pdev, PCI_D0);
15958         if (err) {
15959                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15960                 goto err_out_free_res;
15961         }
15962
15963         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15964         if (!dev) {
15965                 err = -ENOMEM;
15966                 goto err_out_power_down;
15967         }
15968
15969         SET_NETDEV_DEV(dev, &pdev->dev);
15970
15971         tp = netdev_priv(dev);
15972         tp->pdev = pdev;
15973         tp->dev = dev;
15974         tp->pm_cap = pm_cap;
15975         tp->rx_mode = TG3_DEF_RX_MODE;
15976         tp->tx_mode = TG3_DEF_TX_MODE;
15977
15978         if (tg3_debug > 0)
15979                 tp->msg_enable = tg3_debug;
15980         else
15981                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15982
15983         /* The word/byte swap controls here control register access byte
15984          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15985          * setting below.
15986          */
15987         tp->misc_host_ctrl =
15988                 MISC_HOST_CTRL_MASK_PCI_INT |
15989                 MISC_HOST_CTRL_WORD_SWAP |
15990                 MISC_HOST_CTRL_INDIR_ACCESS |
15991                 MISC_HOST_CTRL_PCISTATE_RW;
15992
15993         /* The NONFRM (non-frame) byte/word swap controls take effect
15994          * on descriptor entries, anything which isn't packet data.
15995          *
15996          * The StrongARM chips on the board (one for tx, one for rx)
15997          * are running in big-endian mode.
15998          */
15999         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16000                         GRC_MODE_WSWAP_NONFRM_DATA);
16001 #ifdef __BIG_ENDIAN
16002         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16003 #endif
16004         spin_lock_init(&tp->lock);
16005         spin_lock_init(&tp->indirect_lock);
16006         INIT_WORK(&tp->reset_task, tg3_reset_task);
16007
16008         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16009         if (!tp->regs) {
16010                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16011                 err = -ENOMEM;
16012                 goto err_out_free_dev;
16013         }
16014
16015         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16016             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16017             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16018             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16019             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16020             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16021             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16022             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16023             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16024                 tg3_flag_set(tp, ENABLE_APE);
16025                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16026                 if (!tp->aperegs) {
16027                         dev_err(&pdev->dev,
16028                                 "Cannot map APE registers, aborting\n");
16029                         err = -ENOMEM;
16030                         goto err_out_iounmap;
16031                 }
16032         }
16033
16034         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16035         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16036
16037         dev->ethtool_ops = &tg3_ethtool_ops;
16038         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16039         dev->netdev_ops = &tg3_netdev_ops;
16040         dev->irq = pdev->irq;
16041
16042         err = tg3_get_invariants(tp);
16043         if (err) {
16044                 dev_err(&pdev->dev,
16045                         "Problem fetching invariants of chip, aborting\n");
16046                 goto err_out_apeunmap;
16047         }
16048
16049         /* The EPB bridge inside 5714, 5715, and 5780 and any
16050          * device behind the EPB cannot support DMA addresses > 40-bit.
16051          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16052          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16053          * do DMA address check in tg3_start_xmit().
16054          */
16055         if (tg3_flag(tp, IS_5788))
16056                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16057         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16058                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16059 #ifdef CONFIG_HIGHMEM
16060                 dma_mask = DMA_BIT_MASK(64);
16061 #endif
16062         } else
16063                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16064
16065         /* Configure DMA attributes. */
16066         if (dma_mask > DMA_BIT_MASK(32)) {
16067                 err = pci_set_dma_mask(pdev, dma_mask);
16068                 if (!err) {
16069                         features |= NETIF_F_HIGHDMA;
16070                         err = pci_set_consistent_dma_mask(pdev,
16071                                                           persist_dma_mask);
16072                         if (err < 0) {
16073                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16074                                         "DMA for consistent allocations\n");
16075                                 goto err_out_apeunmap;
16076                         }
16077                 }
16078         }
16079         if (err || dma_mask == DMA_BIT_MASK(32)) {
16080                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16081                 if (err) {
16082                         dev_err(&pdev->dev,
16083                                 "No usable DMA configuration, aborting\n");
16084                         goto err_out_apeunmap;
16085                 }
16086         }
16087
16088         tg3_init_bufmgr_config(tp);
16089
16090         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16091
16092         /* 5700 B0 chips do not support checksumming correctly due
16093          * to hardware bugs.
16094          */
16095         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16096                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16097
16098                 if (tg3_flag(tp, 5755_PLUS))
16099                         features |= NETIF_F_IPV6_CSUM;
16100         }
16101
16102         /* TSO is on by default on chips that support hardware TSO.
16103          * Firmware TSO on older chips gives lower performance, so it
16104          * is off by default, but can be enabled using ethtool.
16105          */
16106         if ((tg3_flag(tp, HW_TSO_1) ||
16107              tg3_flag(tp, HW_TSO_2) ||
16108              tg3_flag(tp, HW_TSO_3)) &&
16109             (features & NETIF_F_IP_CSUM))
16110                 features |= NETIF_F_TSO;
16111         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16112                 if (features & NETIF_F_IPV6_CSUM)
16113                         features |= NETIF_F_TSO6;
16114                 if (tg3_flag(tp, HW_TSO_3) ||
16115                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16116                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16117                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16118                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16119                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16120                         features |= NETIF_F_TSO_ECN;
16121         }
16122
16123         dev->features |= features;
16124         dev->vlan_features |= features;
16125
16126         /*
16127          * Add loopback capability only for a subset of devices that support
16128          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16129          * loopback for the remaining devices.
16130          */
16131         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16132             !tg3_flag(tp, CPMU_PRESENT))
16133                 /* Add the loopback capability */
16134                 features |= NETIF_F_LOOPBACK;
16135
16136         dev->hw_features |= features;
16137
16138         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16139             !tg3_flag(tp, TSO_CAPABLE) &&
16140             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16141                 tg3_flag_set(tp, MAX_RXPEND_64);
16142                 tp->rx_pending = 63;
16143         }
16144
16145         err = tg3_get_device_address(tp);
16146         if (err) {
16147                 dev_err(&pdev->dev,
16148                         "Could not obtain valid ethernet address, aborting\n");
16149                 goto err_out_apeunmap;
16150         }
16151
16152         /*
16153          * Reset chip in case UNDI or EFI driver did not shutdown
16154          * DMA self test will enable WDMAC and we'll see (spurious)
16155          * pending DMA on the PCI bus at that point.
16156          */
16157         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16158             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16159                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16160                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16161         }
16162
16163         err = tg3_test_dma(tp);
16164         if (err) {
16165                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16166                 goto err_out_apeunmap;
16167         }
16168
16169         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16170         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16171         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16172         for (i = 0; i < tp->irq_max; i++) {
16173                 struct tg3_napi *tnapi = &tp->napi[i];
16174
16175                 tnapi->tp = tp;
16176                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16177
16178                 tnapi->int_mbox = intmbx;
16179                 if (i <= 4)
16180                         intmbx += 0x8;
16181                 else
16182                         intmbx += 0x4;
16183
16184                 tnapi->consmbox = rcvmbx;
16185                 tnapi->prodmbox = sndmbx;
16186
16187                 if (i)
16188                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16189                 else
16190                         tnapi->coal_now = HOSTCC_MODE_NOW;
16191
16192                 if (!tg3_flag(tp, SUPPORT_MSIX))
16193                         break;
16194
16195                 /*
16196                  * If we support MSIX, we'll be using RSS.  If we're using
16197                  * RSS, the first vector only handles link interrupts and the
16198                  * remaining vectors handle rx and tx interrupts.  Reuse the
16199                  * mailbox values for the next iteration.  The values we setup
16200                  * above are still useful for the single vectored mode.
16201                  */
16202                 if (!i)
16203                         continue;
16204
16205                 rcvmbx += 0x8;
16206
16207                 if (sndmbx & 0x4)
16208                         sndmbx -= 0x4;
16209                 else
16210                         sndmbx += 0xc;
16211         }
16212
16213         tg3_init_coal(tp);
16214
16215         pci_set_drvdata(pdev, dev);
16216
16217         if (tg3_flag(tp, 5717_PLUS)) {
16218                 /* Resume a low-power mode */
16219                 tg3_frob_aux_power(tp, false);
16220         }
16221
16222         tg3_timer_init(tp);
16223
16224         err = register_netdev(dev);
16225         if (err) {
16226                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16227                 goto err_out_apeunmap;
16228         }
16229
16230         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16231                     tp->board_part_number,
16232                     tp->pci_chip_rev_id,
16233                     tg3_bus_string(tp, str),
16234                     dev->dev_addr);
16235
16236         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16237                 struct phy_device *phydev;
16238                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16239                 netdev_info(dev,
16240                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16241                             phydev->drv->name, dev_name(&phydev->dev));
16242         } else {
16243                 char *ethtype;
16244
16245                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16246                         ethtype = "10/100Base-TX";
16247                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16248                         ethtype = "1000Base-SX";
16249                 else
16250                         ethtype = "10/100/1000Base-T";
16251
16252                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16253                             "(WireSpeed[%d], EEE[%d])\n",
16254                             tg3_phy_string(tp), ethtype,
16255                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16256                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16257         }
16258
16259         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16260                     (dev->features & NETIF_F_RXCSUM) != 0,
16261                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16262                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16263                     tg3_flag(tp, ENABLE_ASF) != 0,
16264                     tg3_flag(tp, TSO_CAPABLE) != 0);
16265         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16266                     tp->dma_rwctrl,
16267                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16268                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16269
16270         pci_save_state(pdev);
16271
16272         return 0;
16273
16274 err_out_apeunmap:
16275         if (tp->aperegs) {
16276                 iounmap(tp->aperegs);
16277                 tp->aperegs = NULL;
16278         }
16279
16280 err_out_iounmap:
16281         if (tp->regs) {
16282                 iounmap(tp->regs);
16283                 tp->regs = NULL;
16284         }
16285
16286 err_out_free_dev:
16287         free_netdev(dev);
16288
16289 err_out_power_down:
16290         pci_set_power_state(pdev, PCI_D3hot);
16291
16292 err_out_free_res:
16293         pci_release_regions(pdev);
16294
16295 err_out_disable_pdev:
16296         pci_disable_device(pdev);
16297         pci_set_drvdata(pdev, NULL);
16298         return err;
16299 }
16300
16301 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16302 {
16303         struct net_device *dev = pci_get_drvdata(pdev);
16304
16305         if (dev) {
16306                 struct tg3 *tp = netdev_priv(dev);
16307
16308                 release_firmware(tp->fw);
16309
16310                 tg3_reset_task_cancel(tp);
16311
16312                 if (tg3_flag(tp, USE_PHYLIB)) {
16313                         tg3_phy_fini(tp);
16314                         tg3_mdio_fini(tp);
16315                 }
16316
16317                 unregister_netdev(dev);
16318                 if (tp->aperegs) {
16319                         iounmap(tp->aperegs);
16320                         tp->aperegs = NULL;
16321                 }
16322                 if (tp->regs) {
16323                         iounmap(tp->regs);
16324                         tp->regs = NULL;
16325                 }
16326                 free_netdev(dev);
16327                 pci_release_regions(pdev);
16328                 pci_disable_device(pdev);
16329                 pci_set_drvdata(pdev, NULL);
16330         }
16331 }
16332
16333 #ifdef CONFIG_PM_SLEEP
16334 static int tg3_suspend(struct device *device)
16335 {
16336         struct pci_dev *pdev = to_pci_dev(device);
16337         struct net_device *dev = pci_get_drvdata(pdev);
16338         struct tg3 *tp = netdev_priv(dev);
16339         int err;
16340
16341         if (!netif_running(dev))
16342                 return 0;
16343
16344         tg3_reset_task_cancel(tp);
16345         tg3_phy_stop(tp);
16346         tg3_netif_stop(tp);
16347
16348         tg3_timer_stop(tp);
16349
16350         tg3_full_lock(tp, 1);
16351         tg3_disable_ints(tp);
16352         tg3_full_unlock(tp);
16353
16354         netif_device_detach(dev);
16355
16356         tg3_full_lock(tp, 0);
16357         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16358         tg3_flag_clear(tp, INIT_COMPLETE);
16359         tg3_full_unlock(tp);
16360
16361         err = tg3_power_down_prepare(tp);
16362         if (err) {
16363                 int err2;
16364
16365                 tg3_full_lock(tp, 0);
16366
16367                 tg3_flag_set(tp, INIT_COMPLETE);
16368                 err2 = tg3_restart_hw(tp, 1);
16369                 if (err2)
16370                         goto out;
16371
16372                 tg3_timer_start(tp);
16373
16374                 netif_device_attach(dev);
16375                 tg3_netif_start(tp);
16376
16377 out:
16378                 tg3_full_unlock(tp);
16379
16380                 if (!err2)
16381                         tg3_phy_start(tp);
16382         }
16383
16384         return err;
16385 }
16386
16387 static int tg3_resume(struct device *device)
16388 {
16389         struct pci_dev *pdev = to_pci_dev(device);
16390         struct net_device *dev = pci_get_drvdata(pdev);
16391         struct tg3 *tp = netdev_priv(dev);
16392         int err;
16393
16394         if (!netif_running(dev))
16395                 return 0;
16396
16397         netif_device_attach(dev);
16398
16399         tg3_full_lock(tp, 0);
16400
16401         tg3_flag_set(tp, INIT_COMPLETE);
16402         err = tg3_restart_hw(tp, 1);
16403         if (err)
16404                 goto out;
16405
16406         tg3_timer_start(tp);
16407
16408         tg3_netif_start(tp);
16409
16410 out:
16411         tg3_full_unlock(tp);
16412
16413         if (!err)
16414                 tg3_phy_start(tp);
16415
16416         return err;
16417 }
16418
16419 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16420 #define TG3_PM_OPS (&tg3_pm_ops)
16421
16422 #else
16423
16424 #define TG3_PM_OPS NULL
16425
16426 #endif /* CONFIG_PM_SLEEP */
16427
16428 /**
16429  * tg3_io_error_detected - called when PCI error is detected
16430  * @pdev: Pointer to PCI device
16431  * @state: The current pci connection state
16432  *
16433  * This function is called after a PCI bus error affecting
16434  * this device has been detected.
16435  */
16436 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16437                                               pci_channel_state_t state)
16438 {
16439         struct net_device *netdev = pci_get_drvdata(pdev);
16440         struct tg3 *tp = netdev_priv(netdev);
16441         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16442
16443         netdev_info(netdev, "PCI I/O error detected\n");
16444
16445         rtnl_lock();
16446
16447         if (!netif_running(netdev))
16448                 goto done;
16449
16450         tg3_phy_stop(tp);
16451
16452         tg3_netif_stop(tp);
16453
16454         tg3_timer_stop(tp);
16455
16456         /* Want to make sure that the reset task doesn't run */
16457         tg3_reset_task_cancel(tp);
16458
16459         netif_device_detach(netdev);
16460
16461         /* Clean up software state, even if MMIO is blocked */
16462         tg3_full_lock(tp, 0);
16463         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16464         tg3_full_unlock(tp);
16465
16466 done:
16467         if (state == pci_channel_io_perm_failure)
16468                 err = PCI_ERS_RESULT_DISCONNECT;
16469         else
16470                 pci_disable_device(pdev);
16471
16472         rtnl_unlock();
16473
16474         return err;
16475 }
16476
16477 /**
16478  * tg3_io_slot_reset - called after the pci bus has been reset.
16479  * @pdev: Pointer to PCI device
16480  *
16481  * Restart the card from scratch, as if from a cold-boot.
16482  * At this point, the card has exprienced a hard reset,
16483  * followed by fixups by BIOS, and has its config space
16484  * set up identically to what it was at cold boot.
16485  */
16486 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16487 {
16488         struct net_device *netdev = pci_get_drvdata(pdev);
16489         struct tg3 *tp = netdev_priv(netdev);
16490         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16491         int err;
16492
16493         rtnl_lock();
16494
16495         if (pci_enable_device(pdev)) {
16496                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16497                 goto done;
16498         }
16499
16500         pci_set_master(pdev);
16501         pci_restore_state(pdev);
16502         pci_save_state(pdev);
16503
16504         if (!netif_running(netdev)) {
16505                 rc = PCI_ERS_RESULT_RECOVERED;
16506                 goto done;
16507         }
16508
16509         err = tg3_power_up(tp);
16510         if (err)
16511                 goto done;
16512
16513         rc = PCI_ERS_RESULT_RECOVERED;
16514
16515 done:
16516         rtnl_unlock();
16517
16518         return rc;
16519 }
16520
16521 /**
16522  * tg3_io_resume - called when traffic can start flowing again.
16523  * @pdev: Pointer to PCI device
16524  *
16525  * This callback is called when the error recovery driver tells
16526  * us that its OK to resume normal operation.
16527  */
16528 static void tg3_io_resume(struct pci_dev *pdev)
16529 {
16530         struct net_device *netdev = pci_get_drvdata(pdev);
16531         struct tg3 *tp = netdev_priv(netdev);
16532         int err;
16533
16534         rtnl_lock();
16535
16536         if (!netif_running(netdev))
16537                 goto done;
16538
16539         tg3_full_lock(tp, 0);
16540         tg3_flag_set(tp, INIT_COMPLETE);
16541         err = tg3_restart_hw(tp, 1);
16542         tg3_full_unlock(tp);
16543         if (err) {
16544                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16545                 goto done;
16546         }
16547
16548         netif_device_attach(netdev);
16549
16550         tg3_timer_start(tp);
16551
16552         tg3_netif_start(tp);
16553
16554         tg3_phy_start(tp);
16555
16556 done:
16557         rtnl_unlock();
16558 }
16559
16560 static const struct pci_error_handlers tg3_err_handler = {
16561         .error_detected = tg3_io_error_detected,
16562         .slot_reset     = tg3_io_slot_reset,
16563         .resume         = tg3_io_resume
16564 };
16565
16566 static struct pci_driver tg3_driver = {
16567         .name           = DRV_MODULE_NAME,
16568         .id_table       = tg3_pci_tbl,
16569         .probe          = tg3_init_one,
16570         .remove         = __devexit_p(tg3_remove_one),
16571         .err_handler    = &tg3_err_handler,
16572         .driver.pm      = TG3_PM_OPS,
16573 };
16574
16575 static int __init tg3_init(void)
16576 {
16577         return pci_register_driver(&tg3_driver);
16578 }
16579
16580 static void __exit tg3_cleanup(void)
16581 {
16582         pci_unregister_driver(&tg3_driver);
16583 }
16584
16585 module_init(tg3_init);
16586 module_exit(tg3_cleanup);