f0434dfbda9cb6945ab5d5440e5d08660b69767d
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 #endif
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #define BAR_0   0
65 #define BAR_2   2
66
67 #include "tg3.h"
68
69 /* Functions & macros to verify TG3_FLAGS types */
70
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         return test_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         set_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         clear_bit(flag, bits);
84 }
85
86 #define tg3_flag(tp, flag)                              \
87         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag)                          \
89         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag)                        \
91         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
92
93 #define DRV_MODULE_NAME         "tg3"
94 #define TG3_MAJ_NUM                     3
95 #define TG3_MIN_NUM                     123
96 #define DRV_MODULE_VERSION      \
97         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE      "March 21, 2012"
99
100 #define RESET_KIND_SHUTDOWN     0
101 #define RESET_KIND_INIT         1
102 #define RESET_KIND_SUSPEND      2
103
104 #define TG3_DEF_RX_MODE         0
105 #define TG3_DEF_TX_MODE         0
106 #define TG3_DEF_MSG_ENABLE        \
107         (NETIF_MSG_DRV          | \
108          NETIF_MSG_PROBE        | \
109          NETIF_MSG_LINK         | \
110          NETIF_MSG_TIMER        | \
111          NETIF_MSG_IFDOWN       | \
112          NETIF_MSG_IFUP         | \
113          NETIF_MSG_RX_ERR       | \
114          NETIF_MSG_TX_ERR)
115
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
117
118 /* length of time before we decide the hardware is borked,
119  * and dev->tx_timeout() should be called to fix the problem
120  */
121
122 #define TG3_TX_TIMEOUT                  (5 * HZ)
123
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU                     60
126 #define TG3_MAX_MTU(tp) \
127         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
128
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130  * You can't change the ring sizes, but you can change where you place
131  * them in the NIC onboard memory.
132  */
133 #define TG3_RX_STD_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING         200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
141
142 /* Do not place this n-ring entries value into the tp struct itself,
143  * we really want to expose these constants to GCC so that modulo et
144  * al.  operations are done with shifts and masks instead of with
145  * hw multiply/modulo instructions.  Another solution would be to
146  * replace things like '% foo' with '& (foo - 1)'.
147  */
148
149 #define TG3_TX_RING_SIZE                512
150 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
151
152 #define TG3_RX_STD_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
159                                  TG3_TX_RING_SIZE)
160 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
161
162 #define TG3_DMA_BYTE_ENAB               64
163
164 #define TG3_RX_STD_DMA_SZ               1536
165 #define TG3_RX_JMB_DMA_SZ               9046
166
167 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
168
169 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
171
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
174
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
177
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179  * that are at least dword aligned when used in PCIX mode.  The driver
180  * works around this bug by double copying the packet.  This workaround
181  * is built into the normal double copy length check for efficiency.
182  *
183  * However, the double copy is only necessary on those architectures
184  * where unaligned memory accesses are inefficient.  For those architectures
185  * where unaligned memory accesses incur little penalty, we can reintegrate
186  * the 5701 in the normal rx path.  Doing so saves a device structure
187  * dereference by hardcoding the double copy threshold in place.
188  */
189 #define TG3_RX_COPY_THRESHOLD           256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
192 #else
193         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
194 #endif
195
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
200 #endif
201
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K            2048
205 #define TG3_TX_BD_DMA_MAX_4K            4096
206
207 #define TG3_RAW_IP_ALIGN 2
208
209 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
210 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
211
212 #define FIRMWARE_TG3            "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
215
216 static char version[] __devinitdata =
217         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
218
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
226
227 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
230
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
314         {}
315 };
316
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
318
319 static const struct {
320         const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
322         { "rx_octets" },
323         { "rx_fragments" },
324         { "rx_ucast_packets" },
325         { "rx_mcast_packets" },
326         { "rx_bcast_packets" },
327         { "rx_fcs_errors" },
328         { "rx_align_errors" },
329         { "rx_xon_pause_rcvd" },
330         { "rx_xoff_pause_rcvd" },
331         { "rx_mac_ctrl_rcvd" },
332         { "rx_xoff_entered" },
333         { "rx_frame_too_long_errors" },
334         { "rx_jabbers" },
335         { "rx_undersize_packets" },
336         { "rx_in_length_errors" },
337         { "rx_out_length_errors" },
338         { "rx_64_or_less_octet_packets" },
339         { "rx_65_to_127_octet_packets" },
340         { "rx_128_to_255_octet_packets" },
341         { "rx_256_to_511_octet_packets" },
342         { "rx_512_to_1023_octet_packets" },
343         { "rx_1024_to_1522_octet_packets" },
344         { "rx_1523_to_2047_octet_packets" },
345         { "rx_2048_to_4095_octet_packets" },
346         { "rx_4096_to_8191_octet_packets" },
347         { "rx_8192_to_9022_octet_packets" },
348
349         { "tx_octets" },
350         { "tx_collisions" },
351
352         { "tx_xon_sent" },
353         { "tx_xoff_sent" },
354         { "tx_flow_control" },
355         { "tx_mac_errors" },
356         { "tx_single_collisions" },
357         { "tx_mult_collisions" },
358         { "tx_deferred" },
359         { "tx_excessive_collisions" },
360         { "tx_late_collisions" },
361         { "tx_collide_2times" },
362         { "tx_collide_3times" },
363         { "tx_collide_4times" },
364         { "tx_collide_5times" },
365         { "tx_collide_6times" },
366         { "tx_collide_7times" },
367         { "tx_collide_8times" },
368         { "tx_collide_9times" },
369         { "tx_collide_10times" },
370         { "tx_collide_11times" },
371         { "tx_collide_12times" },
372         { "tx_collide_13times" },
373         { "tx_collide_14times" },
374         { "tx_collide_15times" },
375         { "tx_ucast_packets" },
376         { "tx_mcast_packets" },
377         { "tx_bcast_packets" },
378         { "tx_carrier_sense_errors" },
379         { "tx_discards" },
380         { "tx_errors" },
381
382         { "dma_writeq_full" },
383         { "dma_write_prioq_full" },
384         { "rxbds_empty" },
385         { "rx_discards" },
386         { "rx_errors" },
387         { "rx_threshold_hit" },
388
389         { "dma_readq_full" },
390         { "dma_read_prioq_full" },
391         { "tx_comp_queue_full" },
392
393         { "ring_set_send_prod_index" },
394         { "ring_status_update" },
395         { "nic_irqs" },
396         { "nic_avoided_irqs" },
397         { "nic_tx_threshold_hit" },
398
399         { "mbuf_lwm_thresh_hit" },
400 };
401
402 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
403
404
405 static const struct {
406         const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408         { "nvram test        (online) " },
409         { "link test         (online) " },
410         { "register test     (offline)" },
411         { "memory test       (offline)" },
412         { "mac loopback test (offline)" },
413         { "phy loopback test (offline)" },
414         { "ext loopback test (offline)" },
415         { "interrupt test    (offline)" },
416 };
417
418 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
419
420
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
422 {
423         writel(val, tp->regs + off);
424 }
425
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
427 {
428         return readl(tp->regs + off);
429 }
430
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->aperegs + off);
434 }
435
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
437 {
438         return readl(tp->aperegs + off);
439 }
440
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
442 {
443         unsigned long flags;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 }
450
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off);
454         readl(tp->regs + off);
455 }
456
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 {
459         unsigned long flags;
460         u32 val;
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465         spin_unlock_irqrestore(&tp->indirect_lock, flags);
466         return val;
467 }
468
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
470 {
471         unsigned long flags;
472
473         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478         if (off == TG3_RX_STD_PROD_IDX_REG) {
479                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480                                        TG3_64BIT_REG_LOW, val);
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488
489         /* In indirect mode when disabling interrupts, we also need
490          * to clear the interrupt bit in the GRC local ctrl register.
491          */
492         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
493             (val == 0x1)) {
494                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
496         }
497 }
498
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512  * where it is unsafe to read back the register without some delay.
513  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
515  */
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
517 {
518         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519                 /* Non-posted methods */
520                 tp->write32(tp, off, val);
521         else {
522                 /* Posted method */
523                 tg3_write32(tp, off, val);
524                 if (usec_wait)
525                         udelay(usec_wait);
526                 tp->read32(tp, off);
527         }
528         /* Wait again after the read for the posted method to guarantee that
529          * the wait time is met.
530          */
531         if (usec_wait)
532                 udelay(usec_wait);
533 }
534
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
536 {
537         tp->write32_mbox(tp, off, val);
538         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539                 tp->read32_mbox(tp, off);
540 }
541
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
543 {
544         void __iomem *mbox = tp->regs + off;
545         writel(val, mbox);
546         if (tg3_flag(tp, TXD_MBOX_HWBUG))
547                 writel(val, mbox);
548         if (tg3_flag(tp, MBOX_WRITE_REORDER))
549                 readl(mbox);
550 }
551
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
553 {
554         return readl(tp->regs + off + GRCMBOX_BASE);
555 }
556
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
558 {
559         writel(val, tp->regs + off + GRCMBOX_BASE);
560 }
561
562 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
567
568 #define tw32(reg, val)                  tp->write32(tp, reg, val)
569 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg)                       tp->read32(tp, reg)
572
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
574 {
575         unsigned long flags;
576
577         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579                 return;
580
581         spin_lock_irqsave(&tp->indirect_lock, flags);
582         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
585
586                 /* Always leave this as zero. */
587                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
588         } else {
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
591
592                 /* Always leave this as zero. */
593                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
594         }
595         spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 }
597
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
599 {
600         unsigned long flags;
601
602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604                 *val = 0;
605                 return;
606         }
607
608         spin_lock_irqsave(&tp->indirect_lock, flags);
609         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
612
613                 /* Always leave this as zero. */
614                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
615         } else {
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617                 *val = tr32(TG3PCI_MEM_WIN_DATA);
618
619                 /* Always leave this as zero. */
620                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
621         }
622         spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 }
624
625 static void tg3_ape_lock_init(struct tg3 *tp)
626 {
627         int i;
628         u32 regbase, bit;
629
630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631                 regbase = TG3_APE_LOCK_GRANT;
632         else
633                 regbase = TG3_APE_PER_LOCK_GRANT;
634
635         /* Make sure the driver hasn't any stale locks. */
636         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
637                 switch (i) {
638                 case TG3_APE_LOCK_PHY0:
639                 case TG3_APE_LOCK_PHY1:
640                 case TG3_APE_LOCK_PHY2:
641                 case TG3_APE_LOCK_PHY3:
642                         bit = APE_LOCK_GRANT_DRIVER;
643                         break;
644                 default:
645                         if (!tp->pci_fn)
646                                 bit = APE_LOCK_GRANT_DRIVER;
647                         else
648                                 bit = 1 << tp->pci_fn;
649                 }
650                 tg3_ape_write32(tp, regbase + 4 * i, bit);
651         }
652
653 }
654
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
656 {
657         int i, off;
658         int ret = 0;
659         u32 status, req, gnt, bit;
660
661         if (!tg3_flag(tp, ENABLE_APE))
662                 return 0;
663
664         switch (locknum) {
665         case TG3_APE_LOCK_GPIO:
666                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667                         return 0;
668         case TG3_APE_LOCK_GRC:
669         case TG3_APE_LOCK_MEM:
670                 if (!tp->pci_fn)
671                         bit = APE_LOCK_REQ_DRIVER;
672                 else
673                         bit = 1 << tp->pci_fn;
674                 break;
675         case TG3_APE_LOCK_PHY0:
676         case TG3_APE_LOCK_PHY1:
677         case TG3_APE_LOCK_PHY2:
678         case TG3_APE_LOCK_PHY3:
679                 bit = APE_LOCK_REQ_DRIVER;
680                 break;
681         default:
682                 return -EINVAL;
683         }
684
685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
686                 req = TG3_APE_LOCK_REQ;
687                 gnt = TG3_APE_LOCK_GRANT;
688         } else {
689                 req = TG3_APE_PER_LOCK_REQ;
690                 gnt = TG3_APE_PER_LOCK_GRANT;
691         }
692
693         off = 4 * locknum;
694
695         tg3_ape_write32(tp, req + off, bit);
696
697         /* Wait for up to 1 millisecond to acquire lock. */
698         for (i = 0; i < 100; i++) {
699                 status = tg3_ape_read32(tp, gnt + off);
700                 if (status == bit)
701                         break;
702                 udelay(10);
703         }
704
705         if (status != bit) {
706                 /* Revoke the lock request. */
707                 tg3_ape_write32(tp, gnt + off, bit);
708                 ret = -EBUSY;
709         }
710
711         return ret;
712 }
713
714 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
715 {
716         u32 gnt, bit;
717
718         if (!tg3_flag(tp, ENABLE_APE))
719                 return;
720
721         switch (locknum) {
722         case TG3_APE_LOCK_GPIO:
723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
724                         return;
725         case TG3_APE_LOCK_GRC:
726         case TG3_APE_LOCK_MEM:
727                 if (!tp->pci_fn)
728                         bit = APE_LOCK_GRANT_DRIVER;
729                 else
730                         bit = 1 << tp->pci_fn;
731                 break;
732         case TG3_APE_LOCK_PHY0:
733         case TG3_APE_LOCK_PHY1:
734         case TG3_APE_LOCK_PHY2:
735         case TG3_APE_LOCK_PHY3:
736                 bit = APE_LOCK_GRANT_DRIVER;
737                 break;
738         default:
739                 return;
740         }
741
742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
743                 gnt = TG3_APE_LOCK_GRANT;
744         else
745                 gnt = TG3_APE_PER_LOCK_GRANT;
746
747         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 }
749
750 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
751 {
752         u32 apedata;
753
754         while (timeout_us) {
755                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756                         return -EBUSY;
757
758                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
759                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760                         break;
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 udelay(10);
765                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766         }
767
768         return timeout_us ? 0 : -EBUSY;
769 }
770
771 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
772 {
773         u32 i, apedata;
774
775         for (i = 0; i < timeout_us / 10; i++) {
776                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
777
778                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
779                         break;
780
781                 udelay(10);
782         }
783
784         return i == timeout_us / 10;
785 }
786
787 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
788 {
789         int err;
790         u32 i, bufoff, msgoff, maxlen, apedata;
791
792         if (!tg3_flag(tp, APE_HAS_NCSI))
793                 return 0;
794
795         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
796         if (apedata != APE_SEG_SIG_MAGIC)
797                 return -ENODEV;
798
799         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
800         if (!(apedata & APE_FW_STATUS_READY))
801                 return -EAGAIN;
802
803         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804                  TG3_APE_SHMEM_BASE;
805         msgoff = bufoff + 2 * sizeof(u32);
806         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
807
808         while (len) {
809                 u32 length;
810
811                 /* Cap xfer sizes to scratchpad limits. */
812                 length = (len > maxlen) ? maxlen : len;
813                 len -= length;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
816                 if (!(apedata & APE_FW_STATUS_READY))
817                         return -EAGAIN;
818
819                 /* Wait for up to 1 msec for APE to service previous event. */
820                 err = tg3_ape_event_lock(tp, 1000);
821                 if (err)
822                         return err;
823
824                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
825                           APE_EVENT_STATUS_SCRTCHPD_READ |
826                           APE_EVENT_STATUS_EVENT_PENDING;
827                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
828
829                 tg3_ape_write32(tp, bufoff, base_off);
830                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
831
832                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
833                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
834
835                 base_off += length;
836
837                 if (tg3_ape_wait_for_event(tp, 30000))
838                         return -EAGAIN;
839
840                 for (i = 0; length; i += 4, length -= 4) {
841                         u32 val = tg3_ape_read32(tp, msgoff + i);
842                         memcpy(data, &val, sizeof(u32));
843                         data++;
844                 }
845         }
846
847         return 0;
848 }
849
850 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
851 {
852         int err;
853         u32 apedata;
854
855         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856         if (apedata != APE_SEG_SIG_MAGIC)
857                 return -EAGAIN;
858
859         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860         if (!(apedata & APE_FW_STATUS_READY))
861                 return -EAGAIN;
862
863         /* Wait for up to 1 millisecond for APE to service previous event. */
864         err = tg3_ape_event_lock(tp, 1000);
865         if (err)
866                 return err;
867
868         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
869                         event | APE_EVENT_STATUS_EVENT_PENDING);
870
871         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874         return 0;
875 }
876
877 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
878 {
879         u32 event;
880         u32 apedata;
881
882         if (!tg3_flag(tp, ENABLE_APE))
883                 return;
884
885         switch (kind) {
886         case RESET_KIND_INIT:
887                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
888                                 APE_HOST_SEG_SIG_MAGIC);
889                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
890                                 APE_HOST_SEG_LEN_MAGIC);
891                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
892                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
893                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
894                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
895                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
896                                 APE_HOST_BEHAV_NO_PHYLOCK);
897                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
898                                     TG3_APE_HOST_DRVR_STATE_START);
899
900                 event = APE_EVENT_STATUS_STATE_START;
901                 break;
902         case RESET_KIND_SHUTDOWN:
903                 /* With the interface we are currently using,
904                  * APE does not track driver state.  Wiping
905                  * out the HOST SEGMENT SIGNATURE forces
906                  * the APE to assume OS absent status.
907                  */
908                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
909
910                 if (device_may_wakeup(&tp->pdev->dev) &&
911                     tg3_flag(tp, WOL_ENABLE)) {
912                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
913                                             TG3_APE_HOST_WOL_SPEED_AUTO);
914                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915                 } else
916                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
917
918                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
919
920                 event = APE_EVENT_STATUS_STATE_UNLOAD;
921                 break;
922         case RESET_KIND_SUSPEND:
923                 event = APE_EVENT_STATUS_STATE_SUSPEND;
924                 break;
925         default:
926                 return;
927         }
928
929         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
930
931         tg3_ape_send_event(tp, event);
932 }
933
934 static void tg3_disable_ints(struct tg3 *tp)
935 {
936         int i;
937
938         tw32(TG3PCI_MISC_HOST_CTRL,
939              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
940         for (i = 0; i < tp->irq_max; i++)
941                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
942 }
943
944 static void tg3_enable_ints(struct tg3 *tp)
945 {
946         int i;
947
948         tp->irq_sync = 0;
949         wmb();
950
951         tw32(TG3PCI_MISC_HOST_CTRL,
952              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
953
954         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
955         for (i = 0; i < tp->irq_cnt; i++) {
956                 struct tg3_napi *tnapi = &tp->napi[i];
957
958                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959                 if (tg3_flag(tp, 1SHOT_MSI))
960                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
961
962                 tp->coal_now |= tnapi->coal_now;
963         }
964
965         /* Force an initial interrupt */
966         if (!tg3_flag(tp, TAGGED_STATUS) &&
967             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
968                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969         else
970                 tw32(HOSTCC_MODE, tp->coal_now);
971
972         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
973 }
974
975 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
976 {
977         struct tg3 *tp = tnapi->tp;
978         struct tg3_hw_status *sblk = tnapi->hw_status;
979         unsigned int work_exists = 0;
980
981         /* check for phy events */
982         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
983                 if (sblk->status & SD_STATUS_LINK_CHG)
984                         work_exists = 1;
985         }
986
987         /* check for TX work to do */
988         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989                 work_exists = 1;
990
991         /* check for RX work to do */
992         if (tnapi->rx_rcb_prod_idx &&
993             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
994                 work_exists = 1;
995
996         return work_exists;
997 }
998
999 /* tg3_int_reenable
1000  *  similar to tg3_enable_ints, but it accurately determines whether there
1001  *  is new work pending and can return without flushing the PIO write
1002  *  which reenables interrupts
1003  */
1004 static void tg3_int_reenable(struct tg3_napi *tnapi)
1005 {
1006         struct tg3 *tp = tnapi->tp;
1007
1008         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009         mmiowb();
1010
1011         /* When doing tagged status, this work check is unnecessary.
1012          * The last_tag we write above tells the chip which piece of
1013          * work we've completed.
1014          */
1015         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1016                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1017                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1018 }
1019
1020 static void tg3_switch_clocks(struct tg3 *tp)
1021 {
1022         u32 clock_ctrl;
1023         u32 orig_clock_ctrl;
1024
1025         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026                 return;
1027
1028         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1029
1030         orig_clock_ctrl = clock_ctrl;
1031         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1032                        CLOCK_CTRL_CLKRUN_OENABLE |
1033                        0x1f);
1034         tp->pci_clock_ctrl = clock_ctrl;
1035
1036         if (tg3_flag(tp, 5705_PLUS)) {
1037                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1038                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1039                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1040                 }
1041         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1042                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043                             clock_ctrl |
1044                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045                             40);
1046                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1047                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048                             40);
1049         }
1050         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1051 }
1052
1053 #define PHY_BUSY_LOOPS  5000
1054
1055 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1056 {
1057         u32 frame_val;
1058         unsigned int loops;
1059         int ret;
1060
1061         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062                 tw32_f(MAC_MI_MODE,
1063                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1064                 udelay(80);
1065         }
1066
1067         tg3_ape_lock(tp, tp->phy_ape_lock);
1068
1069         *val = 0x0;
1070
1071         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1072                       MI_COM_PHY_ADDR_MASK);
1073         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1074                       MI_COM_REG_ADDR_MASK);
1075         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1076
1077         tw32_f(MAC_MI_COM, frame_val);
1078
1079         loops = PHY_BUSY_LOOPS;
1080         while (loops != 0) {
1081                 udelay(10);
1082                 frame_val = tr32(MAC_MI_COM);
1083
1084                 if ((frame_val & MI_COM_BUSY) == 0) {
1085                         udelay(5);
1086                         frame_val = tr32(MAC_MI_COM);
1087                         break;
1088                 }
1089                 loops -= 1;
1090         }
1091
1092         ret = -EBUSY;
1093         if (loops != 0) {
1094                 *val = frame_val & MI_COM_DATA_MASK;
1095                 ret = 0;
1096         }
1097
1098         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_unlock(tp, tp->phy_ape_lock);
1104
1105         return ret;
1106 }
1107
1108 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1109 {
1110         u32 frame_val;
1111         unsigned int loops;
1112         int ret;
1113
1114         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1115             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116                 return 0;
1117
1118         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119                 tw32_f(MAC_MI_MODE,
1120                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1121                 udelay(80);
1122         }
1123
1124         tg3_ape_lock(tp, tp->phy_ape_lock);
1125
1126         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (val & MI_COM_DATA_MASK);
1131         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1132
1133         tw32_f(MAC_MI_COM, frame_val);
1134
1135         loops = PHY_BUSY_LOOPS;
1136         while (loops != 0) {
1137                 udelay(10);
1138                 frame_val = tr32(MAC_MI_COM);
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0)
1149                 ret = 0;
1150
1151         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153                 udelay(80);
1154         }
1155
1156         tg3_ape_unlock(tp, tp->phy_ape_lock);
1157
1158         return ret;
1159 }
1160
1161 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1162 {
1163         int err;
1164
1165         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1166         if (err)
1167                 goto done;
1168
1169         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1170         if (err)
1171                 goto done;
1172
1173         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1174                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1175         if (err)
1176                 goto done;
1177
1178         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1179
1180 done:
1181         return err;
1182 }
1183
1184 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1185 {
1186         int err;
1187
1188         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1189         if (err)
1190                 goto done;
1191
1192         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1193         if (err)
1194                 goto done;
1195
1196         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1197                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1198         if (err)
1199                 goto done;
1200
1201         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1202
1203 done:
1204         return err;
1205 }
1206
1207 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1208 {
1209         int err;
1210
1211         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212         if (!err)
1213                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1214
1215         return err;
1216 }
1217
1218 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223         if (!err)
1224                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1225
1226         return err;
1227 }
1228
1229 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1230 {
1231         int err;
1232
1233         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1234                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1235                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1236         if (!err)
1237                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1238
1239         return err;
1240 }
1241
1242 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1243 {
1244         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1245                 set |= MII_TG3_AUXCTL_MISC_WREN;
1246
1247         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1248 }
1249
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1254
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1258
1259 static int tg3_bmcr_reset(struct tg3 *tp)
1260 {
1261         u32 phy_control;
1262         int limit, err;
1263
1264         /* OK, reset it, and poll the BMCR_RESET bit until it
1265          * clears or we time out.
1266          */
1267         phy_control = BMCR_RESET;
1268         err = tg3_writephy(tp, MII_BMCR, phy_control);
1269         if (err != 0)
1270                 return -EBUSY;
1271
1272         limit = 5000;
1273         while (limit--) {
1274                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1275                 if (err != 0)
1276                         return -EBUSY;
1277
1278                 if ((phy_control & BMCR_RESET) == 0) {
1279                         udelay(40);
1280                         break;
1281                 }
1282                 udelay(10);
1283         }
1284         if (limit < 0)
1285                 return -EBUSY;
1286
1287         return 0;
1288 }
1289
1290 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1291 {
1292         struct tg3 *tp = bp->priv;
1293         u32 val;
1294
1295         spin_lock_bh(&tp->lock);
1296
1297         if (tg3_readphy(tp, reg, &val))
1298                 val = -EIO;
1299
1300         spin_unlock_bh(&tp->lock);
1301
1302         return val;
1303 }
1304
1305 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1306 {
1307         struct tg3 *tp = bp->priv;
1308         u32 ret = 0;
1309
1310         spin_lock_bh(&tp->lock);
1311
1312         if (tg3_writephy(tp, reg, val))
1313                 ret = -EIO;
1314
1315         spin_unlock_bh(&tp->lock);
1316
1317         return ret;
1318 }
1319
1320 static int tg3_mdio_reset(struct mii_bus *bp)
1321 {
1322         return 0;
1323 }
1324
1325 static void tg3_mdio_config_5785(struct tg3 *tp)
1326 {
1327         u32 val;
1328         struct phy_device *phydev;
1329
1330         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1331         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1332         case PHY_ID_BCM50610:
1333         case PHY_ID_BCM50610M:
1334                 val = MAC_PHYCFG2_50610_LED_MODES;
1335                 break;
1336         case PHY_ID_BCMAC131:
1337                 val = MAC_PHYCFG2_AC131_LED_MODES;
1338                 break;
1339         case PHY_ID_RTL8211C:
1340                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341                 break;
1342         case PHY_ID_RTL8201E:
1343                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1344                 break;
1345         default:
1346                 return;
1347         }
1348
1349         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1350                 tw32(MAC_PHYCFG2, val);
1351
1352                 val = tr32(MAC_PHYCFG1);
1353                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1354                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1355                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1356                 tw32(MAC_PHYCFG1, val);
1357
1358                 return;
1359         }
1360
1361         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1362                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1363                        MAC_PHYCFG2_FMODE_MASK_MASK |
1364                        MAC_PHYCFG2_GMODE_MASK_MASK |
1365                        MAC_PHYCFG2_ACT_MASK_MASK   |
1366                        MAC_PHYCFG2_QUAL_MASK_MASK |
1367                        MAC_PHYCFG2_INBAND_ENABLE;
1368
1369         tw32(MAC_PHYCFG2, val);
1370
1371         val = tr32(MAC_PHYCFG1);
1372         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1373                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1374         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1375                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1377                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1378                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1379         }
1380         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1381                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1382         tw32(MAC_PHYCFG1, val);
1383
1384         val = tr32(MAC_EXT_RGMII_MODE);
1385         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1386                  MAC_RGMII_MODE_RX_QUALITY |
1387                  MAC_RGMII_MODE_RX_ACTIVITY |
1388                  MAC_RGMII_MODE_RX_ENG_DET |
1389                  MAC_RGMII_MODE_TX_ENABLE |
1390                  MAC_RGMII_MODE_TX_LOWPWR |
1391                  MAC_RGMII_MODE_TX_RESET);
1392         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1393                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1394                         val |= MAC_RGMII_MODE_RX_INT_B |
1395                                MAC_RGMII_MODE_RX_QUALITY |
1396                                MAC_RGMII_MODE_RX_ACTIVITY |
1397                                MAC_RGMII_MODE_RX_ENG_DET;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1399                         val |= MAC_RGMII_MODE_TX_ENABLE |
1400                                MAC_RGMII_MODE_TX_LOWPWR |
1401                                MAC_RGMII_MODE_TX_RESET;
1402         }
1403         tw32(MAC_EXT_RGMII_MODE, val);
1404 }
1405
1406 static void tg3_mdio_start(struct tg3 *tp)
1407 {
1408         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1409         tw32_f(MAC_MI_MODE, tp->mi_mode);
1410         udelay(80);
1411
1412         if (tg3_flag(tp, MDIOBUS_INITED) &&
1413             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414                 tg3_mdio_config_5785(tp);
1415 }
1416
1417 static int tg3_mdio_init(struct tg3 *tp)
1418 {
1419         int i;
1420         u32 reg;
1421         struct phy_device *phydev;
1422
1423         if (tg3_flag(tp, 5717_PLUS)) {
1424                 u32 is_serdes;
1425
1426                 tp->phy_addr = tp->pci_fn + 1;
1427
1428                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1429                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430                 else
1431                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1432                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1433                 if (is_serdes)
1434                         tp->phy_addr += 7;
1435         } else
1436                 tp->phy_addr = TG3_PHY_MII_ADDR;
1437
1438         tg3_mdio_start(tp);
1439
1440         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441                 return 0;
1442
1443         tp->mdio_bus = mdiobus_alloc();
1444         if (tp->mdio_bus == NULL)
1445                 return -ENOMEM;
1446
1447         tp->mdio_bus->name     = "tg3 mdio bus";
1448         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1449                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1450         tp->mdio_bus->priv     = tp;
1451         tp->mdio_bus->parent   = &tp->pdev->dev;
1452         tp->mdio_bus->read     = &tg3_mdio_read;
1453         tp->mdio_bus->write    = &tg3_mdio_write;
1454         tp->mdio_bus->reset    = &tg3_mdio_reset;
1455         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1456         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1457
1458         for (i = 0; i < PHY_MAX_ADDR; i++)
1459                 tp->mdio_bus->irq[i] = PHY_POLL;
1460
1461         /* The bus registration will look for all the PHYs on the mdio bus.
1462          * Unfortunately, it does not ensure the PHY is powered up before
1463          * accessing the PHY ID registers.  A chip reset is the
1464          * quickest way to bring the device back to an operational state..
1465          */
1466         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1467                 tg3_bmcr_reset(tp);
1468
1469         i = mdiobus_register(tp->mdio_bus);
1470         if (i) {
1471                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1472                 mdiobus_free(tp->mdio_bus);
1473                 return i;
1474         }
1475
1476         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1477
1478         if (!phydev || !phydev->drv) {
1479                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1480                 mdiobus_unregister(tp->mdio_bus);
1481                 mdiobus_free(tp->mdio_bus);
1482                 return -ENODEV;
1483         }
1484
1485         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1486         case PHY_ID_BCM57780:
1487                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1488                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489                 break;
1490         case PHY_ID_BCM50610:
1491         case PHY_ID_BCM50610M:
1492                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1493                                      PHY_BRCM_RX_REFCLK_UNUSED |
1494                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1495                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1496                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1497                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1498                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1500                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1501                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502                 /* fallthru */
1503         case PHY_ID_RTL8211C:
1504                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505                 break;
1506         case PHY_ID_RTL8201E:
1507         case PHY_ID_BCMAC131:
1508                 phydev->interface = PHY_INTERFACE_MODE_MII;
1509                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1510                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1511                 break;
1512         }
1513
1514         tg3_flag_set(tp, MDIOBUS_INITED);
1515
1516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1517                 tg3_mdio_config_5785(tp);
1518
1519         return 0;
1520 }
1521
1522 static void tg3_mdio_fini(struct tg3 *tp)
1523 {
1524         if (tg3_flag(tp, MDIOBUS_INITED)) {
1525                 tg3_flag_clear(tp, MDIOBUS_INITED);
1526                 mdiobus_unregister(tp->mdio_bus);
1527                 mdiobus_free(tp->mdio_bus);
1528         }
1529 }
1530
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3 *tp)
1533 {
1534         u32 val;
1535
1536         val = tr32(GRC_RX_CPU_EVENT);
1537         val |= GRC_RX_CPU_DRIVER_EVENT;
1538         tw32_f(GRC_RX_CPU_EVENT, val);
1539
1540         tp->last_event_jiffies = jiffies;
1541 }
1542
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1544
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3 *tp)
1547 {
1548         int i;
1549         unsigned int delay_cnt;
1550         long time_remain;
1551
1552         /* If enough time has passed, no wait is necessary. */
1553         time_remain = (long)(tp->last_event_jiffies + 1 +
1554                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555                       (long)jiffies;
1556         if (time_remain < 0)
1557                 return;
1558
1559         /* Check if we can shorten the wait time. */
1560         delay_cnt = jiffies_to_usecs(time_remain);
1561         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1562                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1563         delay_cnt = (delay_cnt >> 3) + 1;
1564
1565         for (i = 0; i < delay_cnt; i++) {
1566                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1567                         break;
1568                 udelay(8);
1569         }
1570 }
1571
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1574 {
1575         u32 reg, val;
1576
1577         val = 0;
1578         if (!tg3_readphy(tp, MII_BMCR, &reg))
1579                 val = reg << 16;
1580         if (!tg3_readphy(tp, MII_BMSR, &reg))
1581                 val |= (reg & 0xffff);
1582         *data++ = val;
1583
1584         val = 0;
1585         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1586                 val = reg << 16;
1587         if (!tg3_readphy(tp, MII_LPA, &reg))
1588                 val |= (reg & 0xffff);
1589         *data++ = val;
1590
1591         val = 0;
1592         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1593                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1594                         val = reg << 16;
1595                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1596                         val |= (reg & 0xffff);
1597         }
1598         *data++ = val;
1599
1600         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1601                 val = reg << 16;
1602         else
1603                 val = 0;
1604         *data++ = val;
1605 }
1606
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3 *tp)
1609 {
1610         u32 data[4];
1611
1612         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613                 return;
1614
1615         tg3_phy_gather_ump_data(tp, data);
1616
1617         tg3_wait_for_event_ack(tp);
1618
1619         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1620         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1621         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1622         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1623         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1624         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1625
1626         tg3_generate_fw_event(tp);
1627 }
1628
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3 *tp)
1631 {
1632         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1633                 /* Wait for RX cpu to ACK the previous event. */
1634                 tg3_wait_for_event_ack(tp);
1635
1636                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1637
1638                 tg3_generate_fw_event(tp);
1639
1640                 /* Wait for RX cpu to ACK this event. */
1641                 tg3_wait_for_event_ack(tp);
1642         }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1647 {
1648         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1649                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1650
1651         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652                 switch (kind) {
1653                 case RESET_KIND_INIT:
1654                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1655                                       DRV_STATE_START);
1656                         break;
1657
1658                 case RESET_KIND_SHUTDOWN:
1659                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1660                                       DRV_STATE_UNLOAD);
1661                         break;
1662
1663                 case RESET_KIND_SUSPEND:
1664                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665                                       DRV_STATE_SUSPEND);
1666                         break;
1667
1668                 default:
1669                         break;
1670                 }
1671         }
1672
1673         if (kind == RESET_KIND_INIT ||
1674             kind == RESET_KIND_SUSPEND)
1675                 tg3_ape_driver_state_change(tp, kind);
1676 }
1677
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1680 {
1681         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682                 switch (kind) {
1683                 case RESET_KIND_INIT:
1684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685                                       DRV_STATE_START_DONE);
1686                         break;
1687
1688                 case RESET_KIND_SHUTDOWN:
1689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690                                       DRV_STATE_UNLOAD_DONE);
1691                         break;
1692
1693                 default:
1694                         break;
1695                 }
1696         }
1697
1698         if (kind == RESET_KIND_SHUTDOWN)
1699                 tg3_ape_driver_state_change(tp, kind);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1704 {
1705         if (tg3_flag(tp, ENABLE_ASF)) {
1706                 switch (kind) {
1707                 case RESET_KIND_INIT:
1708                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709                                       DRV_STATE_START);
1710                         break;
1711
1712                 case RESET_KIND_SHUTDOWN:
1713                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714                                       DRV_STATE_UNLOAD);
1715                         break;
1716
1717                 case RESET_KIND_SUSPEND:
1718                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1719                                       DRV_STATE_SUSPEND);
1720                         break;
1721
1722                 default:
1723                         break;
1724                 }
1725         }
1726 }
1727
1728 static int tg3_poll_fw(struct tg3 *tp)
1729 {
1730         int i;
1731         u32 val;
1732
1733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1734                 /* Wait up to 20ms for init done. */
1735                 for (i = 0; i < 200; i++) {
1736                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1737                                 return 0;
1738                         udelay(100);
1739                 }
1740                 return -ENODEV;
1741         }
1742
1743         /* Wait for firmware initialization to complete. */
1744         for (i = 0; i < 100000; i++) {
1745                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1746                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1747                         break;
1748                 udelay(10);
1749         }
1750
1751         /* Chip might not be fitted with firmware.  Some Sun onboard
1752          * parts are configured like that.  So don't signal the timeout
1753          * of the above loop as an error, but do report the lack of
1754          * running firmware once.
1755          */
1756         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1757                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1758
1759                 netdev_info(tp->dev, "No firmware running\n");
1760         }
1761
1762         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1763                 /* The 57765 A0 needs a little more
1764                  * time to do some important work.
1765                  */
1766                 mdelay(10);
1767         }
1768
1769         return 0;
1770 }
1771
1772 static void tg3_link_report(struct tg3 *tp)
1773 {
1774         if (!netif_carrier_ok(tp->dev)) {
1775                 netif_info(tp, link, tp->dev, "Link is down\n");
1776                 tg3_ump_link_report(tp);
1777         } else if (netif_msg_link(tp)) {
1778                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1779                             (tp->link_config.active_speed == SPEED_1000 ?
1780                              1000 :
1781                              (tp->link_config.active_speed == SPEED_100 ?
1782                               100 : 10)),
1783                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1784                              "full" : "half"));
1785
1786                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1787                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788                             "on" : "off",
1789                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790                             "on" : "off");
1791
1792                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1793                         netdev_info(tp->dev, "EEE is %s\n",
1794                                     tp->setlpicnt ? "enabled" : "disabled");
1795
1796                 tg3_ump_link_report(tp);
1797         }
1798 }
1799
1800 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1801 {
1802         u16 miireg;
1803
1804         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1805                 miireg = ADVERTISE_1000XPAUSE;
1806         else if (flow_ctrl & FLOW_CTRL_TX)
1807                 miireg = ADVERTISE_1000XPSE_ASYM;
1808         else if (flow_ctrl & FLOW_CTRL_RX)
1809                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1810         else
1811                 miireg = 0;
1812
1813         return miireg;
1814 }
1815
1816 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1817 {
1818         u8 cap = 0;
1819
1820         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1821                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1822         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1823                 if (lcladv & ADVERTISE_1000XPAUSE)
1824                         cap = FLOW_CTRL_RX;
1825                 if (rmtadv & ADVERTISE_1000XPAUSE)
1826                         cap = FLOW_CTRL_TX;
1827         }
1828
1829         return cap;
1830 }
1831
1832 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1833 {
1834         u8 autoneg;
1835         u8 flowctrl = 0;
1836         u32 old_rx_mode = tp->rx_mode;
1837         u32 old_tx_mode = tp->tx_mode;
1838
1839         if (tg3_flag(tp, USE_PHYLIB))
1840                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841         else
1842                 autoneg = tp->link_config.autoneg;
1843
1844         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1845                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1846                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847                 else
1848                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849         } else
1850                 flowctrl = tp->link_config.flowctrl;
1851
1852         tp->link_config.active_flowctrl = flowctrl;
1853
1854         if (flowctrl & FLOW_CTRL_RX)
1855                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856         else
1857                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1858
1859         if (old_rx_mode != tp->rx_mode)
1860                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1861
1862         if (flowctrl & FLOW_CTRL_TX)
1863                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864         else
1865                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1866
1867         if (old_tx_mode != tp->tx_mode)
1868                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1869 }
1870
1871 static void tg3_adjust_link(struct net_device *dev)
1872 {
1873         u8 oldflowctrl, linkmesg = 0;
1874         u32 mac_mode, lcl_adv, rmt_adv;
1875         struct tg3 *tp = netdev_priv(dev);
1876         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1877
1878         spin_lock_bh(&tp->lock);
1879
1880         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1881                                     MAC_MODE_HALF_DUPLEX);
1882
1883         oldflowctrl = tp->link_config.active_flowctrl;
1884
1885         if (phydev->link) {
1886                 lcl_adv = 0;
1887                 rmt_adv = 0;
1888
1889                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1890                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1891                 else if (phydev->speed == SPEED_1000 ||
1892                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1893                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894                 else
1895                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1896
1897                 if (phydev->duplex == DUPLEX_HALF)
1898                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1899                 else {
1900                         lcl_adv = mii_advertise_flowctrl(
1901                                   tp->link_config.flowctrl);
1902
1903                         if (phydev->pause)
1904                                 rmt_adv = LPA_PAUSE_CAP;
1905                         if (phydev->asym_pause)
1906                                 rmt_adv |= LPA_PAUSE_ASYM;
1907                 }
1908
1909                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910         } else
1911                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1912
1913         if (mac_mode != tp->mac_mode) {
1914                 tp->mac_mode = mac_mode;
1915                 tw32_f(MAC_MODE, tp->mac_mode);
1916                 udelay(40);
1917         }
1918
1919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1920                 if (phydev->speed == SPEED_10)
1921                         tw32(MAC_MI_STAT,
1922                              MAC_MI_STAT_10MBPS_MODE |
1923                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924                 else
1925                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1926         }
1927
1928         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1929                 tw32(MAC_TX_LENGTHS,
1930                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1931                       (6 << TX_LENGTHS_IPG_SHIFT) |
1932                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933         else
1934                 tw32(MAC_TX_LENGTHS,
1935                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1936                       (6 << TX_LENGTHS_IPG_SHIFT) |
1937                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1938
1939         if (phydev->link != tp->old_link ||
1940             phydev->speed != tp->link_config.active_speed ||
1941             phydev->duplex != tp->link_config.active_duplex ||
1942             oldflowctrl != tp->link_config.active_flowctrl)
1943                 linkmesg = 1;
1944
1945         tp->old_link = phydev->link;
1946         tp->link_config.active_speed = phydev->speed;
1947         tp->link_config.active_duplex = phydev->duplex;
1948
1949         spin_unlock_bh(&tp->lock);
1950
1951         if (linkmesg)
1952                 tg3_link_report(tp);
1953 }
1954
1955 static int tg3_phy_init(struct tg3 *tp)
1956 {
1957         struct phy_device *phydev;
1958
1959         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960                 return 0;
1961
1962         /* Bring the PHY back to a known state. */
1963         tg3_bmcr_reset(tp);
1964
1965         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1966
1967         /* Attach the MAC to the PHY. */
1968         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1969                              phydev->dev_flags, phydev->interface);
1970         if (IS_ERR(phydev)) {
1971                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1972                 return PTR_ERR(phydev);
1973         }
1974
1975         /* Mask with MAC supported features. */
1976         switch (phydev->interface) {
1977         case PHY_INTERFACE_MODE_GMII:
1978         case PHY_INTERFACE_MODE_RGMII:
1979                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1980                         phydev->supported &= (PHY_GBIT_FEATURES |
1981                                               SUPPORTED_Pause |
1982                                               SUPPORTED_Asym_Pause);
1983                         break;
1984                 }
1985                 /* fallthru */
1986         case PHY_INTERFACE_MODE_MII:
1987                 phydev->supported &= (PHY_BASIC_FEATURES |
1988                                       SUPPORTED_Pause |
1989                                       SUPPORTED_Asym_Pause);
1990                 break;
1991         default:
1992                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1993                 return -EINVAL;
1994         }
1995
1996         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1997
1998         phydev->advertising = phydev->supported;
1999
2000         return 0;
2001 }
2002
2003 static void tg3_phy_start(struct tg3 *tp)
2004 {
2005         struct phy_device *phydev;
2006
2007         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008                 return;
2009
2010         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2011
2012         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2013                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2014                 phydev->speed = tp->link_config.speed;
2015                 phydev->duplex = tp->link_config.duplex;
2016                 phydev->autoneg = tp->link_config.autoneg;
2017                 phydev->advertising = tp->link_config.advertising;
2018         }
2019
2020         phy_start(phydev);
2021
2022         phy_start_aneg(phydev);
2023 }
2024
2025 static void tg3_phy_stop(struct tg3 *tp)
2026 {
2027         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028                 return;
2029
2030         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2031 }
2032
2033 static void tg3_phy_fini(struct tg3 *tp)
2034 {
2035         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2036                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2037                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2038         }
2039 }
2040
2041 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2042 {
2043         int err;
2044         u32 val;
2045
2046         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047                 return 0;
2048
2049         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2050                 /* Cannot do read-modify-write on 5401 */
2051                 err = tg3_phy_auxctl_write(tp,
2052                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2053                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2054                                            0x4c20);
2055                 goto done;
2056         }
2057
2058         err = tg3_phy_auxctl_read(tp,
2059                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2060         if (err)
2061                 return err;
2062
2063         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2064         err = tg3_phy_auxctl_write(tp,
2065                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2066
2067 done:
2068         return err;
2069 }
2070
2071 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2072 {
2073         u32 phytest;
2074
2075         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076                 u32 phy;
2077
2078                 tg3_writephy(tp, MII_TG3_FET_TEST,
2079                              phytest | MII_TG3_FET_SHADOW_EN);
2080                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081                         if (enable)
2082                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083                         else
2084                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2085                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2086                 }
2087                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2088         }
2089 }
2090
2091 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2092 {
2093         u32 reg;
2094
2095         if (!tg3_flag(tp, 5705_PLUS) ||
2096             (tg3_flag(tp, 5717_PLUS) &&
2097              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098                 return;
2099
2100         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2101                 tg3_phy_fet_toggle_apd(tp, enable);
2102                 return;
2103         }
2104
2105         reg = MII_TG3_MISC_SHDW_WREN |
2106               MII_TG3_MISC_SHDW_SCR5_SEL |
2107               MII_TG3_MISC_SHDW_SCR5_LPED |
2108               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2109               MII_TG3_MISC_SHDW_SCR5_SDTL |
2110               MII_TG3_MISC_SHDW_SCR5_C125OE;
2111         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2112                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2113
2114         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2115
2116
2117         reg = MII_TG3_MISC_SHDW_WREN |
2118               MII_TG3_MISC_SHDW_APD_SEL |
2119               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120         if (enable)
2121                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2122
2123         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2124 }
2125
2126 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2127 {
2128         u32 phy;
2129
2130         if (!tg3_flag(tp, 5705_PLUS) ||
2131             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132                 return;
2133
2134         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135                 u32 ephy;
2136
2137                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2138                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2139
2140                         tg3_writephy(tp, MII_TG3_FET_TEST,
2141                                      ephy | MII_TG3_FET_SHADOW_EN);
2142                         if (!tg3_readphy(tp, reg, &phy)) {
2143                                 if (enable)
2144                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145                                 else
2146                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2147                                 tg3_writephy(tp, reg, phy);
2148                         }
2149                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2150                 }
2151         } else {
2152                 int ret;
2153
2154                 ret = tg3_phy_auxctl_read(tp,
2155                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156                 if (!ret) {
2157                         if (enable)
2158                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159                         else
2160                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2161                         tg3_phy_auxctl_write(tp,
2162                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2163                 }
2164         }
2165 }
2166
2167 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2168 {
2169         int ret;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173                 return;
2174
2175         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176         if (!ret)
2177                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2178                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2179 }
2180
2181 static void tg3_phy_apply_otp(struct tg3 *tp)
2182 {
2183         u32 otp, phy;
2184
2185         if (!tp->phy_otp)
2186                 return;
2187
2188         otp = tp->phy_otp;
2189
2190         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191                 return;
2192
2193         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2194         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2195         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2196
2197         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2198               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2199         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2200
2201         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2202         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2203         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2204
2205         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2206         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2207
2208         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2209         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2210
2211         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2212               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2213         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2214
2215         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2216 }
2217
2218 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2219 {
2220         u32 val;
2221
2222         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2223                 return;
2224
2225         tp->setlpicnt = 0;
2226
2227         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_duplex == DUPLEX_FULL &&
2230             (tp->link_config.active_speed == SPEED_100 ||
2231              tp->link_config.active_speed == SPEED_1000)) {
2232                 u32 eeectl;
2233
2234                 if (tp->link_config.active_speed == SPEED_1000)
2235                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236                 else
2237                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2238
2239                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2240
2241                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2242                                   TG3_CL45_D7_EEERES_STAT, &val);
2243
2244                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2245                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2246                         tp->setlpicnt = 2;
2247         }
2248
2249         if (!tp->setlpicnt) {
2250                 if (current_link_up == 1 &&
2251                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2252                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2253                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254                 }
2255
2256                 val = tr32(TG3_CPMU_EEE_MODE);
2257                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2258         }
2259 }
2260
2261 static void tg3_phy_eee_enable(struct tg3 *tp)
2262 {
2263         u32 val;
2264
2265         if (tp->link_config.active_speed == SPEED_1000 &&
2266             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2267              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2268              tg3_flag(tp, 57765_CLASS)) &&
2269             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2270                 val = MII_TG3_DSP_TAP26_ALNOKO |
2271                       MII_TG3_DSP_TAP26_RMRXSTO;
2272                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2273                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2274         }
2275
2276         val = tr32(TG3_CPMU_EEE_MODE);
2277         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2278 }
2279
2280 static int tg3_wait_macro_done(struct tg3 *tp)
2281 {
2282         int limit = 100;
2283
2284         while (limit--) {
2285                 u32 tmp32;
2286
2287                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2288                         if ((tmp32 & 0x1000) == 0)
2289                                 break;
2290                 }
2291         }
2292         if (limit < 0)
2293                 return -EBUSY;
2294
2295         return 0;
2296 }
2297
2298 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2299 {
2300         static const u32 test_pat[4][6] = {
2301         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2305         };
2306         int chan;
2307
2308         for (chan = 0; chan < 4; chan++) {
2309                 int i;
2310
2311                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2312                              (chan * 0x2000) | 0x0200);
2313                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2314
2315                 for (i = 0; i < 6; i++)
2316                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317                                      test_pat[chan][i]);
2318
2319                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2320                 if (tg3_wait_macro_done(tp)) {
2321                         *resetp = 1;
2322                         return -EBUSY;
2323                 }
2324
2325                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2326                              (chan * 0x2000) | 0x0200);
2327                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2328                 if (tg3_wait_macro_done(tp)) {
2329                         *resetp = 1;
2330                         return -EBUSY;
2331                 }
2332
2333                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2334                 if (tg3_wait_macro_done(tp)) {
2335                         *resetp = 1;
2336                         return -EBUSY;
2337                 }
2338
2339                 for (i = 0; i < 6; i += 2) {
2340                         u32 low, high;
2341
2342                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2343                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2344                             tg3_wait_macro_done(tp)) {
2345                                 *resetp = 1;
2346                                 return -EBUSY;
2347                         }
2348                         low &= 0x7fff;
2349                         high &= 0x000f;
2350                         if (low != test_pat[chan][i] ||
2351                             high != test_pat[chan][i+1]) {
2352                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2353                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2354                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2355
2356                                 return -EBUSY;
2357                         }
2358                 }
2359         }
2360
2361         return 0;
2362 }
2363
2364 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2365 {
2366         int chan;
2367
2368         for (chan = 0; chan < 4; chan++) {
2369                 int i;
2370
2371                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2372                              (chan * 0x2000) | 0x0200);
2373                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2374                 for (i = 0; i < 6; i++)
2375                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2376                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2377                 if (tg3_wait_macro_done(tp))
2378                         return -EBUSY;
2379         }
2380
2381         return 0;
2382 }
2383
2384 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2385 {
2386         u32 reg32, phy9_orig;
2387         int retries, do_phy_reset, err;
2388
2389         retries = 10;
2390         do_phy_reset = 1;
2391         do {
2392                 if (do_phy_reset) {
2393                         err = tg3_bmcr_reset(tp);
2394                         if (err)
2395                                 return err;
2396                         do_phy_reset = 0;
2397                 }
2398
2399                 /* Disable transmitter and interrupt.  */
2400                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2401                         continue;
2402
2403                 reg32 |= 0x3000;
2404                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2405
2406                 /* Set full-duplex, 1000 mbps.  */
2407                 tg3_writephy(tp, MII_BMCR,
2408                              BMCR_FULLDPLX | BMCR_SPEED1000);
2409
2410                 /* Set to master mode.  */
2411                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412                         continue;
2413
2414                 tg3_writephy(tp, MII_CTRL1000,
2415                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2416
2417                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2418                 if (err)
2419                         return err;
2420
2421                 /* Block the PHY control access.  */
2422                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423
2424                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425                 if (!err)
2426                         break;
2427         } while (--retries);
2428
2429         err = tg3_phy_reset_chanpat(tp);
2430         if (err)
2431                 return err;
2432
2433         tg3_phydsp_write(tp, 0x8005, 0x0000);
2434
2435         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2436         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2437
2438         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439
2440         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2441
2442         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2443                 reg32 &= ~0x3000;
2444                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445         } else if (!err)
2446                 err = -EBUSY;
2447
2448         return err;
2449 }
2450
2451 /* This will reset the tigon3 PHY if there is no valid
2452  * link unless the FORCE argument is non-zero.
2453  */
2454 static int tg3_phy_reset(struct tg3 *tp)
2455 {
2456         u32 val, cpmuctrl;
2457         int err;
2458
2459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2460                 val = tr32(GRC_MISC_CFG);
2461                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462                 udelay(40);
2463         }
2464         err  = tg3_readphy(tp, MII_BMSR, &val);
2465         err |= tg3_readphy(tp, MII_BMSR, &val);
2466         if (err != 0)
2467                 return -EBUSY;
2468
2469         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2470                 netif_carrier_off(tp->dev);
2471                 tg3_link_report(tp);
2472         }
2473
2474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2477                 err = tg3_phy_reset_5703_4_5(tp);
2478                 if (err)
2479                         return err;
2480                 goto out;
2481         }
2482
2483         cpmuctrl = 0;
2484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2485             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2486                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2487                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488                         tw32(TG3_CPMU_CTRL,
2489                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2490         }
2491
2492         err = tg3_bmcr_reset(tp);
2493         if (err)
2494                 return err;
2495
2496         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2497                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2498                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2499
2500                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2501         }
2502
2503         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2504             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2505                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2506                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2507                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2508                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509                         udelay(40);
2510                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2511                 }
2512         }
2513
2514         if (tg3_flag(tp, 5717_PLUS) &&
2515             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516                 return 0;
2517
2518         tg3_phy_apply_otp(tp);
2519
2520         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2521                 tg3_phy_toggle_apd(tp, true);
2522         else
2523                 tg3_phy_toggle_apd(tp, false);
2524
2525 out:
2526         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2527             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2528                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2529                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2530                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2531         }
2532
2533         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2534                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2536         }
2537
2538         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2539                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2540                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2541                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2542                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2543                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2544                 }
2545         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2546                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2547                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2548                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2549                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2550                                 tg3_writephy(tp, MII_TG3_TEST1,
2551                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2552                         } else
2553                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2554
2555                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2556                 }
2557         }
2558
2559         /* Set Extended packet length bit (bit 14) on all chips that */
2560         /* support jumbo frames */
2561         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2562                 /* Cannot do read-modify-write on 5401 */
2563                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2564         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2565                 /* Set bit 14 with read-modify-write to preserve other bits */
2566                 err = tg3_phy_auxctl_read(tp,
2567                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568                 if (!err)
2569                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2570                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2571         }
2572
2573         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574          * jumbo frames transmission.
2575          */
2576         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2577                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2578                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2579                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2580         }
2581
2582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2583                 /* adjust output voltage */
2584                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2585         }
2586
2587         tg3_phy_toggle_automdix(tp, 1);
2588         tg3_phy_set_wirespeed(tp);
2589         return 0;
2590 }
2591
2592 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2594 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2595                                           TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600          (TG3_GPIO_MSG_DRVR_PRES << 12))
2601
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606          (TG3_GPIO_MSG_NEED_VAUX << 12))
2607
2608 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2609 {
2610         u32 status, shift;
2611
2612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2614                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615         else
2616                 status = tr32(TG3_CPMU_DRV_STATUS);
2617
2618         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2619         status &= ~(TG3_GPIO_MSG_MASK << shift);
2620         status |= (newstat << shift);
2621
2622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2624                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625         else
2626                 tw32(TG3_CPMU_DRV_STATUS, status);
2627
2628         return status >> TG3_APE_GPIO_MSG_SHIFT;
2629 }
2630
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2632 {
2633         if (!tg3_flag(tp, IS_NIC))
2634                 return 0;
2635
2636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2639                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640                         return -EIO;
2641
2642                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2643
2644                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2645                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2646
2647                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648         } else {
2649                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2650                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2651         }
2652
2653         return 0;
2654 }
2655
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2657 {
2658         u32 grc_local_ctrl;
2659
2660         if (!tg3_flag(tp, IS_NIC) ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663                 return;
2664
2665         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2666
2667         tw32_wait_f(GRC_LOCAL_CTRL,
2668                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2669                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2670
2671         tw32_wait_f(GRC_LOCAL_CTRL,
2672                     grc_local_ctrl,
2673                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2674
2675         tw32_wait_f(GRC_LOCAL_CTRL,
2676                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2677                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2678 }
2679
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2681 {
2682         if (!tg3_flag(tp, IS_NIC))
2683                 return;
2684
2685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2688                             (GRC_LCLCTRL_GPIO_OE0 |
2689                              GRC_LCLCTRL_GPIO_OE1 |
2690                              GRC_LCLCTRL_GPIO_OE2 |
2691                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2692                              GRC_LCLCTRL_GPIO_OUTPUT1),
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2695                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2696                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2698                                      GRC_LCLCTRL_GPIO_OE1 |
2699                                      GRC_LCLCTRL_GPIO_OE2 |
2700                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2701                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2702                                      tp->grc_local_ctrl;
2703                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2704                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2705
2706                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2707                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2708                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2709
2710                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2711                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2712                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2713         } else {
2714                 u32 no_gpio2;
2715                 u32 grc_local_ctrl = 0;
2716
2717                 /* Workaround to prevent overdrawing Amps. */
2718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2719                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2720                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721                                     grc_local_ctrl,
2722                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2723                 }
2724
2725                 /* On 5753 and variants, GPIO2 cannot be used. */
2726                 no_gpio2 = tp->nic_sram_data_cfg &
2727                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2728
2729                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2730                                   GRC_LCLCTRL_GPIO_OE1 |
2731                                   GRC_LCLCTRL_GPIO_OE2 |
2732                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2733                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2734                 if (no_gpio2) {
2735                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2736                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2737                 }
2738                 tw32_wait_f(GRC_LOCAL_CTRL,
2739                             tp->grc_local_ctrl | grc_local_ctrl,
2740                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2741
2742                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2743
2744                 tw32_wait_f(GRC_LOCAL_CTRL,
2745                             tp->grc_local_ctrl | grc_local_ctrl,
2746                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2747
2748                 if (!no_gpio2) {
2749                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2750                         tw32_wait_f(GRC_LOCAL_CTRL,
2751                                     tp->grc_local_ctrl | grc_local_ctrl,
2752                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2753                 }
2754         }
2755 }
2756
2757 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2758 {
2759         u32 msg = 0;
2760
2761         /* Serialize power state transitions */
2762         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763                 return;
2764
2765         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2766                 msg = TG3_GPIO_MSG_NEED_VAUX;
2767
2768         msg = tg3_set_function_status(tp, msg);
2769
2770         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771                 goto done;
2772
2773         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2774                 tg3_pwrsrc_switch_to_vaux(tp);
2775         else
2776                 tg3_pwrsrc_die_with_vmain(tp);
2777
2778 done:
2779         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2780 }
2781
2782 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2783 {
2784         bool need_vaux = false;
2785
2786         /* The GPIOs do something completely different on 57765. */
2787         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788                 return;
2789
2790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2793                 tg3_frob_aux_power_5717(tp, include_wol ?
2794                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2795                 return;
2796         }
2797
2798         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2799                 struct net_device *dev_peer;
2800
2801                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2802
2803                 /* remove_one() may have been run on the peer. */
2804                 if (dev_peer) {
2805                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2806
2807                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2808                                 return;
2809
2810                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2811                             tg3_flag(tp_peer, ENABLE_ASF))
2812                                 need_vaux = true;
2813                 }
2814         }
2815
2816         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2817             tg3_flag(tp, ENABLE_ASF))
2818                 need_vaux = true;
2819
2820         if (need_vaux)
2821                 tg3_pwrsrc_switch_to_vaux(tp);
2822         else
2823                 tg3_pwrsrc_die_with_vmain(tp);
2824 }
2825
2826 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2827 {
2828         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829                 return 1;
2830         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2831                 if (speed != SPEED_10)
2832                         return 1;
2833         } else if (speed == SPEED_10)
2834                 return 1;
2835
2836         return 0;
2837 }
2838
2839 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2840 {
2841         u32 val;
2842
2843         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2845                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2846                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2847
2848                         sg_dig_ctrl |=
2849                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2850                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2851                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2852                 }
2853                 return;
2854         }
2855
2856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857                 tg3_bmcr_reset(tp);
2858                 val = tr32(GRC_MISC_CFG);
2859                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860                 udelay(40);
2861                 return;
2862         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863                 u32 phytest;
2864                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865                         u32 phy;
2866
2867                         tg3_writephy(tp, MII_ADVERTISE, 0);
2868                         tg3_writephy(tp, MII_BMCR,
2869                                      BMCR_ANENABLE | BMCR_ANRESTART);
2870
2871                         tg3_writephy(tp, MII_TG3_FET_TEST,
2872                                      phytest | MII_TG3_FET_SHADOW_EN);
2873                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2874                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875                                 tg3_writephy(tp,
2876                                              MII_TG3_FET_SHDW_AUXMODE4,
2877                                              phy);
2878                         }
2879                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2880                 }
2881                 return;
2882         } else if (do_low_power) {
2883                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2884                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2885
2886                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2887                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2888                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2889                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2890         }
2891
2892         /* The PHY should not be powered down on some chips because
2893          * of bugs.
2894          */
2895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2897             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2898              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2899             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2900              !tp->pci_fn))
2901                 return;
2902
2903         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2904             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2905                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2906                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2907                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2908                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2909         }
2910
2911         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2912 }
2913
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3 *tp)
2916 {
2917         if (tg3_flag(tp, NVRAM)) {
2918                 int i;
2919
2920                 if (tp->nvram_lock_cnt == 0) {
2921                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2922                         for (i = 0; i < 8000; i++) {
2923                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2924                                         break;
2925                                 udelay(20);
2926                         }
2927                         if (i == 8000) {
2928                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2929                                 return -ENODEV;
2930                         }
2931                 }
2932                 tp->nvram_lock_cnt++;
2933         }
2934         return 0;
2935 }
2936
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3 *tp)
2939 {
2940         if (tg3_flag(tp, NVRAM)) {
2941                 if (tp->nvram_lock_cnt > 0)
2942                         tp->nvram_lock_cnt--;
2943                 if (tp->nvram_lock_cnt == 0)
2944                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2945         }
2946 }
2947
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3 *tp)
2950 {
2951         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2952                 u32 nvaccess = tr32(NVRAM_ACCESS);
2953
2954                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2955         }
2956 }
2957
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3 *tp)
2960 {
2961         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2962                 u32 nvaccess = tr32(NVRAM_ACCESS);
2963
2964                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2965         }
2966 }
2967
2968 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2969                                         u32 offset, u32 *val)
2970 {
2971         u32 tmp;
2972         int i;
2973
2974         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975                 return -EINVAL;
2976
2977         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2978                                         EEPROM_ADDR_DEVID_MASK |
2979                                         EEPROM_ADDR_READ);
2980         tw32(GRC_EEPROM_ADDR,
2981              tmp |
2982              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2983              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2984               EEPROM_ADDR_ADDR_MASK) |
2985              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2986
2987         for (i = 0; i < 1000; i++) {
2988                 tmp = tr32(GRC_EEPROM_ADDR);
2989
2990                 if (tmp & EEPROM_ADDR_COMPLETE)
2991                         break;
2992                 msleep(1);
2993         }
2994         if (!(tmp & EEPROM_ADDR_COMPLETE))
2995                 return -EBUSY;
2996
2997         tmp = tr32(GRC_EEPROM_DATA);
2998
2999         /*
3000          * The data will always be opposite the native endian
3001          * format.  Perform a blind byteswap to compensate.
3002          */
3003         *val = swab32(tmp);
3004
3005         return 0;
3006 }
3007
3008 #define NVRAM_CMD_TIMEOUT 10000
3009
3010 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3011 {
3012         int i;
3013
3014         tw32(NVRAM_CMD, nvram_cmd);
3015         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016                 udelay(10);
3017                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3018                         udelay(10);
3019                         break;
3020                 }
3021         }
3022
3023         if (i == NVRAM_CMD_TIMEOUT)
3024                 return -EBUSY;
3025
3026         return 0;
3027 }
3028
3029 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3030 {
3031         if (tg3_flag(tp, NVRAM) &&
3032             tg3_flag(tp, NVRAM_BUFFERED) &&
3033             tg3_flag(tp, FLASH) &&
3034             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3035             (tp->nvram_jedecnum == JEDEC_ATMEL))
3036
3037                 addr = ((addr / tp->nvram_pagesize) <<
3038                         ATMEL_AT45DB0X1B_PAGE_POS) +
3039                        (addr % tp->nvram_pagesize);
3040
3041         return addr;
3042 }
3043
3044 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3045 {
3046         if (tg3_flag(tp, NVRAM) &&
3047             tg3_flag(tp, NVRAM_BUFFERED) &&
3048             tg3_flag(tp, FLASH) &&
3049             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3050             (tp->nvram_jedecnum == JEDEC_ATMEL))
3051
3052                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3053                         tp->nvram_pagesize) +
3054                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3055
3056         return addr;
3057 }
3058
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060  * the byteswapping settings for all other register accesses.
3061  * tg3 devices are BE devices, so on a BE machine, the data
3062  * returned will be exactly as it is seen in NVRAM.  On a LE
3063  * machine, the 32-bit value will be byteswapped.
3064  */
3065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3066 {
3067         int ret;
3068
3069         if (!tg3_flag(tp, NVRAM))
3070                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071
3072         offset = tg3_nvram_phys_addr(tp, offset);
3073
3074         if (offset > NVRAM_ADDR_MSK)
3075                 return -EINVAL;
3076
3077         ret = tg3_nvram_lock(tp);
3078         if (ret)
3079                 return ret;
3080
3081         tg3_enable_nvram_access(tp);
3082
3083         tw32(NVRAM_ADDR, offset);
3084         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3085                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3086
3087         if (ret == 0)
3088                 *val = tr32(NVRAM_RDDATA);
3089
3090         tg3_disable_nvram_access(tp);
3091
3092         tg3_nvram_unlock(tp);
3093
3094         return ret;
3095 }
3096
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3099 {
3100         u32 v;
3101         int res = tg3_nvram_read(tp, offset, &v);
3102         if (!res)
3103                 *val = cpu_to_be32(v);
3104         return res;
3105 }
3106
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3108                                     u32 offset, u32 len, u8 *buf)
3109 {
3110         int i, j, rc = 0;
3111         u32 val;
3112
3113         for (i = 0; i < len; i += 4) {
3114                 u32 addr;
3115                 __be32 data;
3116
3117                 addr = offset + i;
3118
3119                 memcpy(&data, buf + i, 4);
3120
3121                 /*
3122                  * The SEEPROM interface expects the data to always be opposite
3123                  * the native endian format.  We accomplish this by reversing
3124                  * all the operations that would have been performed on the
3125                  * data from a call to tg3_nvram_read_be32().
3126                  */
3127                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3128
3129                 val = tr32(GRC_EEPROM_ADDR);
3130                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3131
3132                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133                         EEPROM_ADDR_READ);
3134                 tw32(GRC_EEPROM_ADDR, val |
3135                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3136                         (addr & EEPROM_ADDR_ADDR_MASK) |
3137                         EEPROM_ADDR_START |
3138                         EEPROM_ADDR_WRITE);
3139
3140                 for (j = 0; j < 1000; j++) {
3141                         val = tr32(GRC_EEPROM_ADDR);
3142
3143                         if (val & EEPROM_ADDR_COMPLETE)
3144                                 break;
3145                         msleep(1);
3146                 }
3147                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3148                         rc = -EBUSY;
3149                         break;
3150                 }
3151         }
3152
3153         return rc;
3154 }
3155
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3158                 u8 *buf)
3159 {
3160         int ret = 0;
3161         u32 pagesize = tp->nvram_pagesize;
3162         u32 pagemask = pagesize - 1;
3163         u32 nvram_cmd;
3164         u8 *tmp;
3165
3166         tmp = kmalloc(pagesize, GFP_KERNEL);
3167         if (tmp == NULL)
3168                 return -ENOMEM;
3169
3170         while (len) {
3171                 int j;
3172                 u32 phy_addr, page_off, size;
3173
3174                 phy_addr = offset & ~pagemask;
3175
3176                 for (j = 0; j < pagesize; j += 4) {
3177                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3178                                                   (__be32 *) (tmp + j));
3179                         if (ret)
3180                                 break;
3181                 }
3182                 if (ret)
3183                         break;
3184
3185                 page_off = offset & pagemask;
3186                 size = pagesize;
3187                 if (len < size)
3188                         size = len;
3189
3190                 len -= size;
3191
3192                 memcpy(tmp + page_off, buf, size);
3193
3194                 offset = offset + (pagesize - page_off);
3195
3196                 tg3_enable_nvram_access(tp);
3197
3198                 /*
3199                  * Before we can erase the flash page, we need
3200                  * to issue a special "write enable" command.
3201                  */
3202                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3203
3204                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205                         break;
3206
3207                 /* Erase the target page */
3208                 tw32(NVRAM_ADDR, phy_addr);
3209
3210                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3211                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3212
3213                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214                         break;
3215
3216                 /* Issue another write enable to start the write. */
3217                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218
3219                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220                         break;
3221
3222                 for (j = 0; j < pagesize; j += 4) {
3223                         __be32 data;
3224
3225                         data = *((__be32 *) (tmp + j));
3226
3227                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3228
3229                         tw32(NVRAM_ADDR, phy_addr + j);
3230
3231                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3232                                 NVRAM_CMD_WR;
3233
3234                         if (j == 0)
3235                                 nvram_cmd |= NVRAM_CMD_FIRST;
3236                         else if (j == (pagesize - 4))
3237                                 nvram_cmd |= NVRAM_CMD_LAST;
3238
3239                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3240                         if (ret)
3241                                 break;
3242                 }
3243                 if (ret)
3244                         break;
3245         }
3246
3247         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248         tg3_nvram_exec_cmd(tp, nvram_cmd);
3249
3250         kfree(tmp);
3251
3252         return ret;
3253 }
3254
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3257                 u8 *buf)
3258 {
3259         int i, ret = 0;
3260
3261         for (i = 0; i < len; i += 4, offset += 4) {
3262                 u32 page_off, phy_addr, nvram_cmd;
3263                 __be32 data;
3264
3265                 memcpy(&data, buf + i, 4);
3266                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3267
3268                 page_off = offset % tp->nvram_pagesize;
3269
3270                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3271
3272                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3273
3274                 if (page_off == 0 || i == 0)
3275                         nvram_cmd |= NVRAM_CMD_FIRST;
3276                 if (page_off == (tp->nvram_pagesize - 4))
3277                         nvram_cmd |= NVRAM_CMD_LAST;
3278
3279                 if (i == (len - 4))
3280                         nvram_cmd |= NVRAM_CMD_LAST;
3281
3282                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3283                     !tg3_flag(tp, FLASH) ||
3284                     !tg3_flag(tp, 57765_PLUS))
3285                         tw32(NVRAM_ADDR, phy_addr);
3286
3287                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3288                     !tg3_flag(tp, 5755_PLUS) &&
3289                     (tp->nvram_jedecnum == JEDEC_ST) &&
3290                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3291                         u32 cmd;
3292
3293                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3294                         ret = tg3_nvram_exec_cmd(tp, cmd);
3295                         if (ret)
3296                                 break;
3297                 }
3298                 if (!tg3_flag(tp, FLASH)) {
3299                         /* We always do complete word writes to eeprom. */
3300                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3301                 }
3302
3303                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3304                 if (ret)
3305                         break;
3306         }
3307         return ret;
3308 }
3309
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3312 {
3313         int ret;
3314
3315         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3316                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3317                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3318                 udelay(40);
3319         }
3320
3321         if (!tg3_flag(tp, NVRAM)) {
3322                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3323         } else {
3324                 u32 grc_mode;
3325
3326                 ret = tg3_nvram_lock(tp);
3327                 if (ret)
3328                         return ret;
3329
3330                 tg3_enable_nvram_access(tp);
3331                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3332                         tw32(NVRAM_WRITE1, 0x406);
3333
3334                 grc_mode = tr32(GRC_MODE);
3335                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3336
3337                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3338                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339                                 buf);
3340                 } else {
3341                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3342                                 buf);
3343                 }
3344
3345                 grc_mode = tr32(GRC_MODE);
3346                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3347
3348                 tg3_disable_nvram_access(tp);
3349                 tg3_nvram_unlock(tp);
3350         }
3351
3352         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3353                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3354                 udelay(40);
3355         }
3356
3357         return ret;
3358 }
3359
3360 #define RX_CPU_SCRATCH_BASE     0x30000
3361 #define RX_CPU_SCRATCH_SIZE     0x04000
3362 #define TX_CPU_SCRATCH_BASE     0x34000
3363 #define TX_CPU_SCRATCH_SIZE     0x04000
3364
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3367 {
3368         int i;
3369
3370         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3371
3372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3373                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3374
3375                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376                 return 0;
3377         }
3378         if (offset == RX_CPU_BASE) {
3379                 for (i = 0; i < 10000; i++) {
3380                         tw32(offset + CPU_STATE, 0xffffffff);
3381                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3382                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3383                                 break;
3384                 }
3385
3386                 tw32(offset + CPU_STATE, 0xffffffff);
3387                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3388                 udelay(10);
3389         } else {
3390                 for (i = 0; i < 10000; i++) {
3391                         tw32(offset + CPU_STATE, 0xffffffff);
3392                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3393                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3394                                 break;
3395                 }
3396         }
3397
3398         if (i >= 10000) {
3399                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3400                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3401                 return -ENODEV;
3402         }
3403
3404         /* Clear firmware's nvram arbitration. */
3405         if (tg3_flag(tp, NVRAM))
3406                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3407         return 0;
3408 }
3409
3410 struct fw_info {
3411         unsigned int fw_base;
3412         unsigned int fw_len;
3413         const __be32 *fw_data;
3414 };
3415
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3418                                  u32 cpu_scratch_base, int cpu_scratch_size,
3419                                  struct fw_info *info)
3420 {
3421         int err, lock_err, i;
3422         void (*write_op)(struct tg3 *, u32, u32);
3423
3424         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425                 netdev_err(tp->dev,
3426                            "%s: Trying to load TX cpu firmware which is 5705\n",
3427                            __func__);
3428                 return -EINVAL;
3429         }
3430
3431         if (tg3_flag(tp, 5705_PLUS))
3432                 write_op = tg3_write_mem;
3433         else
3434                 write_op = tg3_write_indirect_reg32;
3435
3436         /* It is possible that bootcode is still loading at this point.
3437          * Get the nvram lock first before halting the cpu.
3438          */
3439         lock_err = tg3_nvram_lock(tp);
3440         err = tg3_halt_cpu(tp, cpu_base);
3441         if (!lock_err)
3442                 tg3_nvram_unlock(tp);
3443         if (err)
3444                 goto out;
3445
3446         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3447                 write_op(tp, cpu_scratch_base + i, 0);
3448         tw32(cpu_base + CPU_STATE, 0xffffffff);
3449         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3450         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3451                 write_op(tp, (cpu_scratch_base +
3452                               (info->fw_base & 0xffff) +
3453                               (i * sizeof(u32))),
3454                               be32_to_cpu(info->fw_data[i]));
3455
3456         err = 0;
3457
3458 out:
3459         return err;
3460 }
3461
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3464 {
3465         struct fw_info info;
3466         const __be32 *fw_data;
3467         int err, i;
3468
3469         fw_data = (void *)tp->fw->data;
3470
3471         /* Firmware blob starts with version numbers, followed by
3472            start address and length. We are setting complete length.
3473            length = end_address_of_bss - start_address_of_text.
3474            Remainder is the blob to be loaded contiguously
3475            from start address. */
3476
3477         info.fw_base = be32_to_cpu(fw_data[1]);
3478         info.fw_len = tp->fw->size - 12;
3479         info.fw_data = &fw_data[3];
3480
3481         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3482                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3483                                     &info);
3484         if (err)
3485                 return err;
3486
3487         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3488                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3489                                     &info);
3490         if (err)
3491                 return err;
3492
3493         /* Now startup only the RX cpu. */
3494         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3495         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3496
3497         for (i = 0; i < 5; i++) {
3498                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499                         break;
3500                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3501                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3502                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3503                 udelay(1000);
3504         }
3505         if (i >= 5) {
3506                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3507                            "should be %08x\n", __func__,
3508                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509                 return -ENODEV;
3510         }
3511         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3512         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3513
3514         return 0;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3 *tp)
3519 {
3520         struct fw_info info;
3521         const __be32 *fw_data;
3522         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523         int err, i;
3524
3525         if (tg3_flag(tp, HW_TSO_1) ||
3526             tg3_flag(tp, HW_TSO_2) ||
3527             tg3_flag(tp, HW_TSO_3))
3528                 return 0;
3529
3530         fw_data = (void *)tp->fw->data;
3531
3532         /* Firmware blob starts with version numbers, followed by
3533            start address and length. We are setting complete length.
3534            length = end_address_of_bss - start_address_of_text.
3535            Remainder is the blob to be loaded contiguously
3536            from start address. */
3537
3538         info.fw_base = be32_to_cpu(fw_data[1]);
3539         cpu_scratch_size = tp->fw_len;
3540         info.fw_len = tp->fw->size - 12;
3541         info.fw_data = &fw_data[3];
3542
3543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3544                 cpu_base = RX_CPU_BASE;
3545                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546         } else {
3547                 cpu_base = TX_CPU_BASE;
3548                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3549                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3550         }
3551
3552         err = tg3_load_firmware_cpu(tp, cpu_base,
3553                                     cpu_scratch_base, cpu_scratch_size,
3554                                     &info);
3555         if (err)
3556                 return err;
3557
3558         /* Now startup the cpu. */
3559         tw32(cpu_base + CPU_STATE, 0xffffffff);
3560         tw32_f(cpu_base + CPU_PC, info.fw_base);
3561
3562         for (i = 0; i < 5; i++) {
3563                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564                         break;
3565                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3566                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3567                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3568                 udelay(1000);
3569         }
3570         if (i >= 5) {
3571                 netdev_err(tp->dev,
3572                            "%s fails to set CPU PC, is %08x should be %08x\n",
3573                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574                 return -ENODEV;
3575         }
3576         tw32(cpu_base + CPU_STATE, 0xffffffff);
3577         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3578         return 0;
3579 }
3580
3581
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3584 {
3585         u32 addr_high, addr_low;
3586         int i;
3587
3588         addr_high = ((tp->dev->dev_addr[0] << 8) |
3589                      tp->dev->dev_addr[1]);
3590         addr_low = ((tp->dev->dev_addr[2] << 24) |
3591                     (tp->dev->dev_addr[3] << 16) |
3592                     (tp->dev->dev_addr[4] <<  8) |
3593                     (tp->dev->dev_addr[5] <<  0));
3594         for (i = 0; i < 4; i++) {
3595                 if (i == 1 && skip_mac_1)
3596                         continue;
3597                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3598                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3599         }
3600
3601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3602             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3603                 for (i = 0; i < 12; i++) {
3604                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3605                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3606                 }
3607         }
3608
3609         addr_high = (tp->dev->dev_addr[0] +
3610                      tp->dev->dev_addr[1] +
3611                      tp->dev->dev_addr[2] +
3612                      tp->dev->dev_addr[3] +
3613                      tp->dev->dev_addr[4] +
3614                      tp->dev->dev_addr[5]) &
3615                 TX_BACKOFF_SEED_MASK;
3616         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3617 }
3618
3619 static void tg3_enable_register_access(struct tg3 *tp)
3620 {
3621         /*
3622          * Make sure register accesses (indirect or otherwise) will function
3623          * correctly.
3624          */
3625         pci_write_config_dword(tp->pdev,
3626                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3627 }
3628
3629 static int tg3_power_up(struct tg3 *tp)
3630 {
3631         int err;
3632
3633         tg3_enable_register_access(tp);
3634
3635         err = pci_set_power_state(tp->pdev, PCI_D0);
3636         if (!err) {
3637                 /* Switch out of Vaux if it is a NIC */
3638                 tg3_pwrsrc_switch_to_vmain(tp);
3639         } else {
3640                 netdev_err(tp->dev, "Transition to D0 failed\n");
3641         }
3642
3643         return err;
3644 }
3645
3646 static int tg3_setup_phy(struct tg3 *, int);
3647
3648 static int tg3_power_down_prepare(struct tg3 *tp)
3649 {
3650         u32 misc_host_ctrl;
3651         bool device_should_wake, do_low_power;
3652
3653         tg3_enable_register_access(tp);
3654
3655         /* Restore the CLKREQ setting. */
3656         if (tg3_flag(tp, CLKREQ_BUG)) {
3657                 u16 lnkctl;
3658
3659                 pci_read_config_word(tp->pdev,
3660                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3661                                      &lnkctl);
3662                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3663                 pci_write_config_word(tp->pdev,
3664                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3665                                       lnkctl);
3666         }
3667
3668         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3669         tw32(TG3PCI_MISC_HOST_CTRL,
3670              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3671
3672         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3673                              tg3_flag(tp, WOL_ENABLE);
3674
3675         if (tg3_flag(tp, USE_PHYLIB)) {
3676                 do_low_power = false;
3677                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3678                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3679                         struct phy_device *phydev;
3680                         u32 phyid, advertising;
3681
3682                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3683
3684                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3685
3686                         tp->link_config.speed = phydev->speed;
3687                         tp->link_config.duplex = phydev->duplex;
3688                         tp->link_config.autoneg = phydev->autoneg;
3689                         tp->link_config.advertising = phydev->advertising;
3690
3691                         advertising = ADVERTISED_TP |
3692                                       ADVERTISED_Pause |
3693                                       ADVERTISED_Autoneg |
3694                                       ADVERTISED_10baseT_Half;
3695
3696                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3697                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3698                                         advertising |=
3699                                                 ADVERTISED_100baseT_Half |
3700                                                 ADVERTISED_100baseT_Full |
3701                                                 ADVERTISED_10baseT_Full;
3702                                 else
3703                                         advertising |= ADVERTISED_10baseT_Full;
3704                         }
3705
3706                         phydev->advertising = advertising;
3707
3708                         phy_start_aneg(phydev);
3709
3710                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3711                         if (phyid != PHY_ID_BCMAC131) {
3712                                 phyid &= PHY_BCM_OUI_MASK;
3713                                 if (phyid == PHY_BCM_OUI_1 ||
3714                                     phyid == PHY_BCM_OUI_2 ||
3715                                     phyid == PHY_BCM_OUI_3)
3716                                         do_low_power = true;
3717                         }
3718                 }
3719         } else {
3720                 do_low_power = true;
3721
3722                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3723                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3724
3725                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3726                         tg3_setup_phy(tp, 0);
3727         }
3728
3729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3730                 u32 val;
3731
3732                 val = tr32(GRC_VCPU_EXT_CTRL);
3733                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3734         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3735                 int i;
3736                 u32 val;
3737
3738                 for (i = 0; i < 200; i++) {
3739                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3740                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3741                                 break;
3742                         msleep(1);
3743                 }
3744         }
3745         if (tg3_flag(tp, WOL_CAP))
3746                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3747                                                      WOL_DRV_STATE_SHUTDOWN |
3748                                                      WOL_DRV_WOL |
3749                                                      WOL_SET_MAGIC_PKT);
3750
3751         if (device_should_wake) {
3752                 u32 mac_mode;
3753
3754                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3755                         if (do_low_power &&
3756                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3757                                 tg3_phy_auxctl_write(tp,
3758                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3759                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3760                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3761                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3762                                 udelay(40);
3763                         }
3764
3765                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3766                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3767                         else
3768                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3769
3770                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3772                             ASIC_REV_5700) {
3773                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3774                                              SPEED_100 : SPEED_10;
3775                                 if (tg3_5700_link_polarity(tp, speed))
3776                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3777                                 else
3778                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3779                         }
3780                 } else {
3781                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3782                 }
3783
3784                 if (!tg3_flag(tp, 5750_PLUS))
3785                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3786
3787                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3788                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3789                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3790                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3791
3792                 if (tg3_flag(tp, ENABLE_APE))
3793                         mac_mode |= MAC_MODE_APE_TX_EN |
3794                                     MAC_MODE_APE_RX_EN |
3795                                     MAC_MODE_TDE_ENABLE;
3796
3797                 tw32_f(MAC_MODE, mac_mode);
3798                 udelay(100);
3799
3800                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3801                 udelay(10);
3802         }
3803
3804         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3805             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3806              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3807                 u32 base_val;
3808
3809                 base_val = tp->pci_clock_ctrl;
3810                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3811                              CLOCK_CTRL_TXCLK_DISABLE);
3812
3813                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3814                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3815         } else if (tg3_flag(tp, 5780_CLASS) ||
3816                    tg3_flag(tp, CPMU_PRESENT) ||
3817                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3818                 /* do nothing */
3819         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3820                 u32 newbits1, newbits2;
3821
3822                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3823                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3824                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3825                                     CLOCK_CTRL_TXCLK_DISABLE |
3826                                     CLOCK_CTRL_ALTCLK);
3827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3828                 } else if (tg3_flag(tp, 5705_PLUS)) {
3829                         newbits1 = CLOCK_CTRL_625_CORE;
3830                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3831                 } else {
3832                         newbits1 = CLOCK_CTRL_ALTCLK;
3833                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3834                 }
3835
3836                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3837                             40);
3838
3839                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3840                             40);
3841
3842                 if (!tg3_flag(tp, 5705_PLUS)) {
3843                         u32 newbits3;
3844
3845                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3847                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3848                                             CLOCK_CTRL_TXCLK_DISABLE |
3849                                             CLOCK_CTRL_44MHZ_CORE);
3850                         } else {
3851                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3852                         }
3853
3854                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3855                                     tp->pci_clock_ctrl | newbits3, 40);
3856                 }
3857         }
3858
3859         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3860                 tg3_power_down_phy(tp, do_low_power);
3861
3862         tg3_frob_aux_power(tp, true);
3863
3864         /* Workaround for unstable PLL clock */
3865         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3866             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3867                 u32 val = tr32(0x7d00);
3868
3869                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3870                 tw32(0x7d00, val);
3871                 if (!tg3_flag(tp, ENABLE_ASF)) {
3872                         int err;
3873
3874                         err = tg3_nvram_lock(tp);
3875                         tg3_halt_cpu(tp, RX_CPU_BASE);
3876                         if (!err)
3877                                 tg3_nvram_unlock(tp);
3878                 }
3879         }
3880
3881         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3882
3883         return 0;
3884 }
3885
3886 static void tg3_power_down(struct tg3 *tp)
3887 {
3888         tg3_power_down_prepare(tp);
3889
3890         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3891         pci_set_power_state(tp->pdev, PCI_D3hot);
3892 }
3893
3894 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3895 {
3896         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3897         case MII_TG3_AUX_STAT_10HALF:
3898                 *speed = SPEED_10;
3899                 *duplex = DUPLEX_HALF;
3900                 break;
3901
3902         case MII_TG3_AUX_STAT_10FULL:
3903                 *speed = SPEED_10;
3904                 *duplex = DUPLEX_FULL;
3905                 break;
3906
3907         case MII_TG3_AUX_STAT_100HALF:
3908                 *speed = SPEED_100;
3909                 *duplex = DUPLEX_HALF;
3910                 break;
3911
3912         case MII_TG3_AUX_STAT_100FULL:
3913                 *speed = SPEED_100;
3914                 *duplex = DUPLEX_FULL;
3915                 break;
3916
3917         case MII_TG3_AUX_STAT_1000HALF:
3918                 *speed = SPEED_1000;
3919                 *duplex = DUPLEX_HALF;
3920                 break;
3921
3922         case MII_TG3_AUX_STAT_1000FULL:
3923                 *speed = SPEED_1000;
3924                 *duplex = DUPLEX_FULL;
3925                 break;
3926
3927         default:
3928                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3929                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3930                                  SPEED_10;
3931                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3932                                   DUPLEX_HALF;
3933                         break;
3934                 }
3935                 *speed = SPEED_UNKNOWN;
3936                 *duplex = DUPLEX_UNKNOWN;
3937                 break;
3938         }
3939 }
3940
3941 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3942 {
3943         int err = 0;
3944         u32 val, new_adv;
3945
3946         new_adv = ADVERTISE_CSMA;
3947         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3948         new_adv |= mii_advertise_flowctrl(flowctrl);
3949
3950         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3951         if (err)
3952                 goto done;
3953
3954         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3955                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3956
3957                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3958                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3959                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3960
3961                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3962                 if (err)
3963                         goto done;
3964         }
3965
3966         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3967                 goto done;
3968
3969         tw32(TG3_CPMU_EEE_MODE,
3970              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3971
3972         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3973         if (!err) {
3974                 u32 err2;
3975
3976                 val = 0;
3977                 /* Advertise 100-BaseTX EEE ability */
3978                 if (advertise & ADVERTISED_100baseT_Full)
3979                         val |= MDIO_AN_EEE_ADV_100TX;
3980                 /* Advertise 1000-BaseT EEE ability */
3981                 if (advertise & ADVERTISED_1000baseT_Full)
3982                         val |= MDIO_AN_EEE_ADV_1000T;
3983                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3984                 if (err)
3985                         val = 0;
3986
3987                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3988                 case ASIC_REV_5717:
3989                 case ASIC_REV_57765:
3990                 case ASIC_REV_57766:
3991                 case ASIC_REV_5719:
3992                         /* If we advertised any eee advertisements above... */
3993                         if (val)
3994                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3995                                       MII_TG3_DSP_TAP26_RMRXSTO |
3996                                       MII_TG3_DSP_TAP26_OPCSINPT;
3997                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3998                         /* Fall through */
3999                 case ASIC_REV_5720:
4000                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4001                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4002                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4003                 }
4004
4005                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4006                 if (!err)
4007                         err = err2;
4008         }
4009
4010 done:
4011         return err;
4012 }
4013
4014 static void tg3_phy_copper_begin(struct tg3 *tp)
4015 {
4016         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4017             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4018                 u32 adv, fc;
4019
4020                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4021                         adv = ADVERTISED_10baseT_Half |
4022                               ADVERTISED_10baseT_Full;
4023                         if (tg3_flag(tp, WOL_SPEED_100MB))
4024                                 adv |= ADVERTISED_100baseT_Half |
4025                                        ADVERTISED_100baseT_Full;
4026
4027                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4028                 } else {
4029                         adv = tp->link_config.advertising;
4030                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4031                                 adv &= ~(ADVERTISED_1000baseT_Half |
4032                                          ADVERTISED_1000baseT_Full);
4033
4034                         fc = tp->link_config.flowctrl;
4035                 }
4036
4037                 tg3_phy_autoneg_cfg(tp, adv, fc);
4038
4039                 tg3_writephy(tp, MII_BMCR,
4040                              BMCR_ANENABLE | BMCR_ANRESTART);
4041         } else {
4042                 int i;
4043                 u32 bmcr, orig_bmcr;
4044
4045                 tp->link_config.active_speed = tp->link_config.speed;
4046                 tp->link_config.active_duplex = tp->link_config.duplex;
4047
4048                 bmcr = 0;
4049                 switch (tp->link_config.speed) {
4050                 default:
4051                 case SPEED_10:
4052                         break;
4053
4054                 case SPEED_100:
4055                         bmcr |= BMCR_SPEED100;
4056                         break;
4057
4058                 case SPEED_1000:
4059                         bmcr |= BMCR_SPEED1000;
4060                         break;
4061                 }
4062
4063                 if (tp->link_config.duplex == DUPLEX_FULL)
4064                         bmcr |= BMCR_FULLDPLX;
4065
4066                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4067                     (bmcr != orig_bmcr)) {
4068                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4069                         for (i = 0; i < 1500; i++) {
4070                                 u32 tmp;
4071
4072                                 udelay(10);
4073                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4074                                     tg3_readphy(tp, MII_BMSR, &tmp))
4075                                         continue;
4076                                 if (!(tmp & BMSR_LSTATUS)) {
4077                                         udelay(40);
4078                                         break;
4079                                 }
4080                         }
4081                         tg3_writephy(tp, MII_BMCR, bmcr);
4082                         udelay(40);
4083                 }
4084         }
4085 }
4086
4087 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4088 {
4089         int err;
4090
4091         /* Turn off tap power management. */
4092         /* Set Extended packet length bit */
4093         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4094
4095         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4096         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4097         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4098         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4099         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4100
4101         udelay(40);
4102
4103         return err;
4104 }
4105
4106 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4107 {
4108         u32 advmsk, tgtadv, advertising;
4109
4110         advertising = tp->link_config.advertising;
4111         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4112
4113         advmsk = ADVERTISE_ALL;
4114         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4115                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4116                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4117         }
4118
4119         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4120                 return false;
4121
4122         if ((*lcladv & advmsk) != tgtadv)
4123                 return false;
4124
4125         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4126                 u32 tg3_ctrl;
4127
4128                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4129
4130                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4131                         return false;
4132
4133                 if (tgtadv &&
4134                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4136                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4137                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4138                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4139                 } else {
4140                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4141                 }
4142
4143                 if (tg3_ctrl != tgtadv)
4144                         return false;
4145         }
4146
4147         return true;
4148 }
4149
4150 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4151 {
4152         u32 lpeth = 0;
4153
4154         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4155                 u32 val;
4156
4157                 if (tg3_readphy(tp, MII_STAT1000, &val))
4158                         return false;
4159
4160                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4161         }
4162
4163         if (tg3_readphy(tp, MII_LPA, rmtadv))
4164                 return false;
4165
4166         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4167         tp->link_config.rmt_adv = lpeth;
4168
4169         return true;
4170 }
4171
4172 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4173 {
4174         int current_link_up;
4175         u32 bmsr, val;
4176         u32 lcl_adv, rmt_adv;
4177         u16 current_speed;
4178         u8 current_duplex;
4179         int i, err;
4180
4181         tw32(MAC_EVENT, 0);
4182
4183         tw32_f(MAC_STATUS,
4184              (MAC_STATUS_SYNC_CHANGED |
4185               MAC_STATUS_CFG_CHANGED |
4186               MAC_STATUS_MI_COMPLETION |
4187               MAC_STATUS_LNKSTATE_CHANGED));
4188         udelay(40);
4189
4190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4191                 tw32_f(MAC_MI_MODE,
4192                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4193                 udelay(80);
4194         }
4195
4196         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4197
4198         /* Some third-party PHYs need to be reset on link going
4199          * down.
4200          */
4201         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4202              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4203              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4204             netif_carrier_ok(tp->dev)) {
4205                 tg3_readphy(tp, MII_BMSR, &bmsr);
4206                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4207                     !(bmsr & BMSR_LSTATUS))
4208                         force_reset = 1;
4209         }
4210         if (force_reset)
4211                 tg3_phy_reset(tp);
4212
4213         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4214                 tg3_readphy(tp, MII_BMSR, &bmsr);
4215                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4216                     !tg3_flag(tp, INIT_COMPLETE))
4217                         bmsr = 0;
4218
4219                 if (!(bmsr & BMSR_LSTATUS)) {
4220                         err = tg3_init_5401phy_dsp(tp);
4221                         if (err)
4222                                 return err;
4223
4224                         tg3_readphy(tp, MII_BMSR, &bmsr);
4225                         for (i = 0; i < 1000; i++) {
4226                                 udelay(10);
4227                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4228                                     (bmsr & BMSR_LSTATUS)) {
4229                                         udelay(40);
4230                                         break;
4231                                 }
4232                         }
4233
4234                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4235                             TG3_PHY_REV_BCM5401_B0 &&
4236                             !(bmsr & BMSR_LSTATUS) &&
4237                             tp->link_config.active_speed == SPEED_1000) {
4238                                 err = tg3_phy_reset(tp);
4239                                 if (!err)
4240                                         err = tg3_init_5401phy_dsp(tp);
4241                                 if (err)
4242                                         return err;
4243                         }
4244                 }
4245         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4246                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4247                 /* 5701 {A0,B0} CRC bug workaround */
4248                 tg3_writephy(tp, 0x15, 0x0a75);
4249                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4250                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4251                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4252         }
4253
4254         /* Clear pending interrupts... */
4255         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4256         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4257
4258         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4259                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4260         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4261                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4262
4263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4265                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4266                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4267                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4268                 else
4269                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4270         }
4271
4272         current_link_up = 0;
4273         current_speed = SPEED_UNKNOWN;
4274         current_duplex = DUPLEX_UNKNOWN;
4275         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4276         tp->link_config.rmt_adv = 0;
4277
4278         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4279                 err = tg3_phy_auxctl_read(tp,
4280                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4281                                           &val);
4282                 if (!err && !(val & (1 << 10))) {
4283                         tg3_phy_auxctl_write(tp,
4284                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4285                                              val | (1 << 10));
4286                         goto relink;
4287                 }
4288         }
4289
4290         bmsr = 0;
4291         for (i = 0; i < 100; i++) {
4292                 tg3_readphy(tp, MII_BMSR, &bmsr);
4293                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4294                     (bmsr & BMSR_LSTATUS))
4295                         break;
4296                 udelay(40);
4297         }
4298
4299         if (bmsr & BMSR_LSTATUS) {
4300                 u32 aux_stat, bmcr;
4301
4302                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4303                 for (i = 0; i < 2000; i++) {
4304                         udelay(10);
4305                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4306                             aux_stat)
4307                                 break;
4308                 }
4309
4310                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4311                                              &current_speed,
4312                                              &current_duplex);
4313
4314                 bmcr = 0;
4315                 for (i = 0; i < 200; i++) {
4316                         tg3_readphy(tp, MII_BMCR, &bmcr);
4317                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4318                                 continue;
4319                         if (bmcr && bmcr != 0x7fff)
4320                                 break;
4321                         udelay(10);
4322                 }
4323
4324                 lcl_adv = 0;
4325                 rmt_adv = 0;
4326
4327                 tp->link_config.active_speed = current_speed;
4328                 tp->link_config.active_duplex = current_duplex;
4329
4330                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4331                         if ((bmcr & BMCR_ANENABLE) &&
4332                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4333                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4334                                 current_link_up = 1;
4335                 } else {
4336                         if (!(bmcr & BMCR_ANENABLE) &&
4337                             tp->link_config.speed == current_speed &&
4338                             tp->link_config.duplex == current_duplex &&
4339                             tp->link_config.flowctrl ==
4340                             tp->link_config.active_flowctrl) {
4341                                 current_link_up = 1;
4342                         }
4343                 }
4344
4345                 if (current_link_up == 1 &&
4346                     tp->link_config.active_duplex == DUPLEX_FULL) {
4347                         u32 reg, bit;
4348
4349                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4350                                 reg = MII_TG3_FET_GEN_STAT;
4351                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4352                         } else {
4353                                 reg = MII_TG3_EXT_STAT;
4354                                 bit = MII_TG3_EXT_STAT_MDIX;
4355                         }
4356
4357                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4358                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4359
4360                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4361                 }
4362         }
4363
4364 relink:
4365         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4366                 tg3_phy_copper_begin(tp);
4367
4368                 tg3_readphy(tp, MII_BMSR, &bmsr);
4369                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4370                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4371                         current_link_up = 1;
4372         }
4373
4374         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4375         if (current_link_up == 1) {
4376                 if (tp->link_config.active_speed == SPEED_100 ||
4377                     tp->link_config.active_speed == SPEED_10)
4378                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4379                 else
4380                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4381         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4382                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4383         else
4384                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4385
4386         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4387         if (tp->link_config.active_duplex == DUPLEX_HALF)
4388                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4389
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4391                 if (current_link_up == 1 &&
4392                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4393                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4394                 else
4395                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4396         }
4397
4398         /* ??? Without this setting Netgear GA302T PHY does not
4399          * ??? send/receive packets...
4400          */
4401         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4402             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4403                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4404                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4405                 udelay(80);
4406         }
4407
4408         tw32_f(MAC_MODE, tp->mac_mode);
4409         udelay(40);
4410
4411         tg3_phy_eee_adjust(tp, current_link_up);
4412
4413         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4414                 /* Polled via timer. */
4415                 tw32_f(MAC_EVENT, 0);
4416         } else {
4417                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4418         }
4419         udelay(40);
4420
4421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4422             current_link_up == 1 &&
4423             tp->link_config.active_speed == SPEED_1000 &&
4424             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4425                 udelay(120);
4426                 tw32_f(MAC_STATUS,
4427                      (MAC_STATUS_SYNC_CHANGED |
4428                       MAC_STATUS_CFG_CHANGED));
4429                 udelay(40);
4430                 tg3_write_mem(tp,
4431                               NIC_SRAM_FIRMWARE_MBOX,
4432                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4433         }
4434
4435         /* Prevent send BD corruption. */
4436         if (tg3_flag(tp, CLKREQ_BUG)) {
4437                 u16 oldlnkctl, newlnkctl;
4438
4439                 pci_read_config_word(tp->pdev,
4440                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4441                                      &oldlnkctl);
4442                 if (tp->link_config.active_speed == SPEED_100 ||
4443                     tp->link_config.active_speed == SPEED_10)
4444                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4445                 else
4446                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4447                 if (newlnkctl != oldlnkctl)
4448                         pci_write_config_word(tp->pdev,
4449                                               pci_pcie_cap(tp->pdev) +
4450                                               PCI_EXP_LNKCTL, newlnkctl);
4451         }
4452
4453         if (current_link_up != netif_carrier_ok(tp->dev)) {
4454                 if (current_link_up)
4455                         netif_carrier_on(tp->dev);
4456                 else
4457                         netif_carrier_off(tp->dev);
4458                 tg3_link_report(tp);
4459         }
4460
4461         return 0;
4462 }
4463
4464 struct tg3_fiber_aneginfo {
4465         int state;
4466 #define ANEG_STATE_UNKNOWN              0
4467 #define ANEG_STATE_AN_ENABLE            1
4468 #define ANEG_STATE_RESTART_INIT         2
4469 #define ANEG_STATE_RESTART              3
4470 #define ANEG_STATE_DISABLE_LINK_OK      4
4471 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4472 #define ANEG_STATE_ABILITY_DETECT       6
4473 #define ANEG_STATE_ACK_DETECT_INIT      7
4474 #define ANEG_STATE_ACK_DETECT           8
4475 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4476 #define ANEG_STATE_COMPLETE_ACK         10
4477 #define ANEG_STATE_IDLE_DETECT_INIT     11
4478 #define ANEG_STATE_IDLE_DETECT          12
4479 #define ANEG_STATE_LINK_OK              13
4480 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4481 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4482
4483         u32 flags;
4484 #define MR_AN_ENABLE            0x00000001
4485 #define MR_RESTART_AN           0x00000002
4486 #define MR_AN_COMPLETE          0x00000004
4487 #define MR_PAGE_RX              0x00000008
4488 #define MR_NP_LOADED            0x00000010
4489 #define MR_TOGGLE_TX            0x00000020
4490 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4491 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4492 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4493 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4494 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4495 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4496 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4497 #define MR_TOGGLE_RX            0x00002000
4498 #define MR_NP_RX                0x00004000
4499
4500 #define MR_LINK_OK              0x80000000
4501
4502         unsigned long link_time, cur_time;
4503
4504         u32 ability_match_cfg;
4505         int ability_match_count;
4506
4507         char ability_match, idle_match, ack_match;
4508
4509         u32 txconfig, rxconfig;
4510 #define ANEG_CFG_NP             0x00000080
4511 #define ANEG_CFG_ACK            0x00000040
4512 #define ANEG_CFG_RF2            0x00000020
4513 #define ANEG_CFG_RF1            0x00000010
4514 #define ANEG_CFG_PS2            0x00000001
4515 #define ANEG_CFG_PS1            0x00008000
4516 #define ANEG_CFG_HD             0x00004000
4517 #define ANEG_CFG_FD             0x00002000
4518 #define ANEG_CFG_INVAL          0x00001f06
4519
4520 };
4521 #define ANEG_OK         0
4522 #define ANEG_DONE       1
4523 #define ANEG_TIMER_ENAB 2
4524 #define ANEG_FAILED     -1
4525
4526 #define ANEG_STATE_SETTLE_TIME  10000
4527
4528 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4529                                    struct tg3_fiber_aneginfo *ap)
4530 {
4531         u16 flowctrl;
4532         unsigned long delta;
4533         u32 rx_cfg_reg;
4534         int ret;
4535
4536         if (ap->state == ANEG_STATE_UNKNOWN) {
4537                 ap->rxconfig = 0;
4538                 ap->link_time = 0;
4539                 ap->cur_time = 0;
4540                 ap->ability_match_cfg = 0;
4541                 ap->ability_match_count = 0;
4542                 ap->ability_match = 0;
4543                 ap->idle_match = 0;
4544                 ap->ack_match = 0;
4545         }
4546         ap->cur_time++;
4547
4548         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4549                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4550
4551                 if (rx_cfg_reg != ap->ability_match_cfg) {
4552                         ap->ability_match_cfg = rx_cfg_reg;
4553                         ap->ability_match = 0;
4554                         ap->ability_match_count = 0;
4555                 } else {
4556                         if (++ap->ability_match_count > 1) {
4557                                 ap->ability_match = 1;
4558                                 ap->ability_match_cfg = rx_cfg_reg;
4559                         }
4560                 }
4561                 if (rx_cfg_reg & ANEG_CFG_ACK)
4562                         ap->ack_match = 1;
4563                 else
4564                         ap->ack_match = 0;
4565
4566                 ap->idle_match = 0;
4567         } else {
4568                 ap->idle_match = 1;
4569                 ap->ability_match_cfg = 0;
4570                 ap->ability_match_count = 0;
4571                 ap->ability_match = 0;
4572                 ap->ack_match = 0;
4573
4574                 rx_cfg_reg = 0;
4575         }
4576
4577         ap->rxconfig = rx_cfg_reg;
4578         ret = ANEG_OK;
4579
4580         switch (ap->state) {
4581         case ANEG_STATE_UNKNOWN:
4582                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4583                         ap->state = ANEG_STATE_AN_ENABLE;
4584
4585                 /* fallthru */
4586         case ANEG_STATE_AN_ENABLE:
4587                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4588                 if (ap->flags & MR_AN_ENABLE) {
4589                         ap->link_time = 0;
4590                         ap->cur_time = 0;
4591                         ap->ability_match_cfg = 0;
4592                         ap->ability_match_count = 0;
4593                         ap->ability_match = 0;
4594                         ap->idle_match = 0;
4595                         ap->ack_match = 0;
4596
4597                         ap->state = ANEG_STATE_RESTART_INIT;
4598                 } else {
4599                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4600                 }
4601                 break;
4602
4603         case ANEG_STATE_RESTART_INIT:
4604                 ap->link_time = ap->cur_time;
4605                 ap->flags &= ~(MR_NP_LOADED);
4606                 ap->txconfig = 0;
4607                 tw32(MAC_TX_AUTO_NEG, 0);
4608                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4609                 tw32_f(MAC_MODE, tp->mac_mode);
4610                 udelay(40);
4611
4612                 ret = ANEG_TIMER_ENAB;
4613                 ap->state = ANEG_STATE_RESTART;
4614
4615                 /* fallthru */
4616         case ANEG_STATE_RESTART:
4617                 delta = ap->cur_time - ap->link_time;
4618                 if (delta > ANEG_STATE_SETTLE_TIME)
4619                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4620                 else
4621                         ret = ANEG_TIMER_ENAB;
4622                 break;
4623
4624         case ANEG_STATE_DISABLE_LINK_OK:
4625                 ret = ANEG_DONE;
4626                 break;
4627
4628         case ANEG_STATE_ABILITY_DETECT_INIT:
4629                 ap->flags &= ~(MR_TOGGLE_TX);
4630                 ap->txconfig = ANEG_CFG_FD;
4631                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4632                 if (flowctrl & ADVERTISE_1000XPAUSE)
4633                         ap->txconfig |= ANEG_CFG_PS1;
4634                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4635                         ap->txconfig |= ANEG_CFG_PS2;
4636                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4637                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4638                 tw32_f(MAC_MODE, tp->mac_mode);
4639                 udelay(40);
4640
4641                 ap->state = ANEG_STATE_ABILITY_DETECT;
4642                 break;
4643
4644         case ANEG_STATE_ABILITY_DETECT:
4645                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4646                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4647                 break;
4648
4649         case ANEG_STATE_ACK_DETECT_INIT:
4650                 ap->txconfig |= ANEG_CFG_ACK;
4651                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4652                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4653                 tw32_f(MAC_MODE, tp->mac_mode);
4654                 udelay(40);
4655
4656                 ap->state = ANEG_STATE_ACK_DETECT;
4657
4658                 /* fallthru */
4659         case ANEG_STATE_ACK_DETECT:
4660                 if (ap->ack_match != 0) {
4661                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4662                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4663                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4664                         } else {
4665                                 ap->state = ANEG_STATE_AN_ENABLE;
4666                         }
4667                 } else if (ap->ability_match != 0 &&
4668                            ap->rxconfig == 0) {
4669                         ap->state = ANEG_STATE_AN_ENABLE;
4670                 }
4671                 break;
4672
4673         case ANEG_STATE_COMPLETE_ACK_INIT:
4674                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4675                         ret = ANEG_FAILED;
4676                         break;
4677                 }
4678                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4679                                MR_LP_ADV_HALF_DUPLEX |
4680                                MR_LP_ADV_SYM_PAUSE |
4681                                MR_LP_ADV_ASYM_PAUSE |
4682                                MR_LP_ADV_REMOTE_FAULT1 |
4683                                MR_LP_ADV_REMOTE_FAULT2 |
4684                                MR_LP_ADV_NEXT_PAGE |
4685                                MR_TOGGLE_RX |
4686                                MR_NP_RX);
4687                 if (ap->rxconfig & ANEG_CFG_FD)
4688                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4689                 if (ap->rxconfig & ANEG_CFG_HD)
4690                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4691                 if (ap->rxconfig & ANEG_CFG_PS1)
4692                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4693                 if (ap->rxconfig & ANEG_CFG_PS2)
4694                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4695                 if (ap->rxconfig & ANEG_CFG_RF1)
4696                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4697                 if (ap->rxconfig & ANEG_CFG_RF2)
4698                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4699                 if (ap->rxconfig & ANEG_CFG_NP)
4700                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4701
4702                 ap->link_time = ap->cur_time;
4703
4704                 ap->flags ^= (MR_TOGGLE_TX);
4705                 if (ap->rxconfig & 0x0008)
4706                         ap->flags |= MR_TOGGLE_RX;
4707                 if (ap->rxconfig & ANEG_CFG_NP)
4708                         ap->flags |= MR_NP_RX;
4709                 ap->flags |= MR_PAGE_RX;
4710
4711                 ap->state = ANEG_STATE_COMPLETE_ACK;
4712                 ret = ANEG_TIMER_ENAB;
4713                 break;
4714
4715         case ANEG_STATE_COMPLETE_ACK:
4716                 if (ap->ability_match != 0 &&
4717                     ap->rxconfig == 0) {
4718                         ap->state = ANEG_STATE_AN_ENABLE;
4719                         break;
4720                 }
4721                 delta = ap->cur_time - ap->link_time;
4722                 if (delta > ANEG_STATE_SETTLE_TIME) {
4723                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4724                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4725                         } else {
4726                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4727                                     !(ap->flags & MR_NP_RX)) {
4728                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4729                                 } else {
4730                                         ret = ANEG_FAILED;
4731                                 }
4732                         }
4733                 }
4734                 break;
4735
4736         case ANEG_STATE_IDLE_DETECT_INIT:
4737                 ap->link_time = ap->cur_time;
4738                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4739                 tw32_f(MAC_MODE, tp->mac_mode);
4740                 udelay(40);
4741
4742                 ap->state = ANEG_STATE_IDLE_DETECT;
4743                 ret = ANEG_TIMER_ENAB;
4744                 break;
4745
4746         case ANEG_STATE_IDLE_DETECT:
4747                 if (ap->ability_match != 0 &&
4748                     ap->rxconfig == 0) {
4749                         ap->state = ANEG_STATE_AN_ENABLE;
4750                         break;
4751                 }
4752                 delta = ap->cur_time - ap->link_time;
4753                 if (delta > ANEG_STATE_SETTLE_TIME) {
4754                         /* XXX another gem from the Broadcom driver :( */
4755                         ap->state = ANEG_STATE_LINK_OK;
4756                 }
4757                 break;
4758
4759         case ANEG_STATE_LINK_OK:
4760                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4761                 ret = ANEG_DONE;
4762                 break;
4763
4764         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4765                 /* ??? unimplemented */
4766                 break;
4767
4768         case ANEG_STATE_NEXT_PAGE_WAIT:
4769                 /* ??? unimplemented */
4770                 break;
4771
4772         default:
4773                 ret = ANEG_FAILED;
4774                 break;
4775         }
4776
4777         return ret;
4778 }
4779
4780 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4781 {
4782         int res = 0;
4783         struct tg3_fiber_aneginfo aninfo;
4784         int status = ANEG_FAILED;
4785         unsigned int tick;
4786         u32 tmp;
4787
4788         tw32_f(MAC_TX_AUTO_NEG, 0);
4789
4790         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4791         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4792         udelay(40);
4793
4794         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4795         udelay(40);
4796
4797         memset(&aninfo, 0, sizeof(aninfo));
4798         aninfo.flags |= MR_AN_ENABLE;
4799         aninfo.state = ANEG_STATE_UNKNOWN;
4800         aninfo.cur_time = 0;
4801         tick = 0;
4802         while (++tick < 195000) {
4803                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4804                 if (status == ANEG_DONE || status == ANEG_FAILED)
4805                         break;
4806
4807                 udelay(1);
4808         }
4809
4810         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4811         tw32_f(MAC_MODE, tp->mac_mode);
4812         udelay(40);
4813
4814         *txflags = aninfo.txconfig;
4815         *rxflags = aninfo.flags;
4816
4817         if (status == ANEG_DONE &&
4818             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4819                              MR_LP_ADV_FULL_DUPLEX)))
4820                 res = 1;
4821
4822         return res;
4823 }
4824
4825 static void tg3_init_bcm8002(struct tg3 *tp)
4826 {
4827         u32 mac_status = tr32(MAC_STATUS);
4828         int i;
4829
4830         /* Reset when initting first time or we have a link. */
4831         if (tg3_flag(tp, INIT_COMPLETE) &&
4832             !(mac_status & MAC_STATUS_PCS_SYNCED))
4833                 return;
4834
4835         /* Set PLL lock range. */
4836         tg3_writephy(tp, 0x16, 0x8007);
4837
4838         /* SW reset */
4839         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4840
4841         /* Wait for reset to complete. */
4842         /* XXX schedule_timeout() ... */
4843         for (i = 0; i < 500; i++)
4844                 udelay(10);
4845
4846         /* Config mode; select PMA/Ch 1 regs. */
4847         tg3_writephy(tp, 0x10, 0x8411);
4848
4849         /* Enable auto-lock and comdet, select txclk for tx. */
4850         tg3_writephy(tp, 0x11, 0x0a10);
4851
4852         tg3_writephy(tp, 0x18, 0x00a0);
4853         tg3_writephy(tp, 0x16, 0x41ff);
4854
4855         /* Assert and deassert POR. */
4856         tg3_writephy(tp, 0x13, 0x0400);
4857         udelay(40);
4858         tg3_writephy(tp, 0x13, 0x0000);
4859
4860         tg3_writephy(tp, 0x11, 0x0a50);
4861         udelay(40);
4862         tg3_writephy(tp, 0x11, 0x0a10);
4863
4864         /* Wait for signal to stabilize */
4865         /* XXX schedule_timeout() ... */
4866         for (i = 0; i < 15000; i++)
4867                 udelay(10);
4868
4869         /* Deselect the channel register so we can read the PHYID
4870          * later.
4871          */
4872         tg3_writephy(tp, 0x10, 0x8011);
4873 }
4874
4875 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4876 {
4877         u16 flowctrl;
4878         u32 sg_dig_ctrl, sg_dig_status;
4879         u32 serdes_cfg, expected_sg_dig_ctrl;
4880         int workaround, port_a;
4881         int current_link_up;
4882
4883         serdes_cfg = 0;
4884         expected_sg_dig_ctrl = 0;
4885         workaround = 0;
4886         port_a = 1;
4887         current_link_up = 0;
4888
4889         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4890             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4891                 workaround = 1;
4892                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4893                         port_a = 0;
4894
4895                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4896                 /* preserve bits 20-23 for voltage regulator */
4897                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4898         }
4899
4900         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4901
4902         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4903                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4904                         if (workaround) {
4905                                 u32 val = serdes_cfg;
4906
4907                                 if (port_a)
4908                                         val |= 0xc010000;
4909                                 else
4910                                         val |= 0x4010000;
4911                                 tw32_f(MAC_SERDES_CFG, val);
4912                         }
4913
4914                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4915                 }
4916                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4917                         tg3_setup_flow_control(tp, 0, 0);
4918                         current_link_up = 1;
4919                 }
4920                 goto out;
4921         }
4922
4923         /* Want auto-negotiation.  */
4924         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4925
4926         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4927         if (flowctrl & ADVERTISE_1000XPAUSE)
4928                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4929         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4930                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4931
4932         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4933                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4934                     tp->serdes_counter &&
4935                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4936                                     MAC_STATUS_RCVD_CFG)) ==
4937                      MAC_STATUS_PCS_SYNCED)) {
4938                         tp->serdes_counter--;
4939                         current_link_up = 1;
4940                         goto out;
4941                 }
4942 restart_autoneg:
4943                 if (workaround)
4944                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4945                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4946                 udelay(5);
4947                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4948
4949                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4950                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4951         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4952                                  MAC_STATUS_SIGNAL_DET)) {
4953                 sg_dig_status = tr32(SG_DIG_STATUS);
4954                 mac_status = tr32(MAC_STATUS);
4955
4956                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4957                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4958                         u32 local_adv = 0, remote_adv = 0;
4959
4960                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4961                                 local_adv |= ADVERTISE_1000XPAUSE;
4962                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4963                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4964
4965                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4966                                 remote_adv |= LPA_1000XPAUSE;
4967                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4968                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4969
4970                         tp->link_config.rmt_adv =
4971                                            mii_adv_to_ethtool_adv_x(remote_adv);
4972
4973                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4974                         current_link_up = 1;
4975                         tp->serdes_counter = 0;
4976                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4977                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4978                         if (tp->serdes_counter)
4979                                 tp->serdes_counter--;
4980                         else {
4981                                 if (workaround) {
4982                                         u32 val = serdes_cfg;
4983
4984                                         if (port_a)
4985                                                 val |= 0xc010000;
4986                                         else
4987                                                 val |= 0x4010000;
4988
4989                                         tw32_f(MAC_SERDES_CFG, val);
4990                                 }
4991
4992                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4993                                 udelay(40);
4994
4995                                 /* Link parallel detection - link is up */
4996                                 /* only if we have PCS_SYNC and not */
4997                                 /* receiving config code words */
4998                                 mac_status = tr32(MAC_STATUS);
4999                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5000                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5001                                         tg3_setup_flow_control(tp, 0, 0);
5002                                         current_link_up = 1;
5003                                         tp->phy_flags |=
5004                                                 TG3_PHYFLG_PARALLEL_DETECT;
5005                                         tp->serdes_counter =
5006                                                 SERDES_PARALLEL_DET_TIMEOUT;
5007                                 } else
5008                                         goto restart_autoneg;
5009                         }
5010                 }
5011         } else {
5012                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5013                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5014         }
5015
5016 out:
5017         return current_link_up;
5018 }
5019
5020 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5021 {
5022         int current_link_up = 0;
5023
5024         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5025                 goto out;
5026
5027         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5028                 u32 txflags, rxflags;
5029                 int i;
5030
5031                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5032                         u32 local_adv = 0, remote_adv = 0;
5033
5034                         if (txflags & ANEG_CFG_PS1)
5035                                 local_adv |= ADVERTISE_1000XPAUSE;
5036                         if (txflags & ANEG_CFG_PS2)
5037                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5038
5039                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5040                                 remote_adv |= LPA_1000XPAUSE;
5041                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5042                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5043
5044                         tp->link_config.rmt_adv =
5045                                            mii_adv_to_ethtool_adv_x(remote_adv);
5046
5047                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5048
5049                         current_link_up = 1;
5050                 }
5051                 for (i = 0; i < 30; i++) {
5052                         udelay(20);
5053                         tw32_f(MAC_STATUS,
5054                                (MAC_STATUS_SYNC_CHANGED |
5055                                 MAC_STATUS_CFG_CHANGED));
5056                         udelay(40);
5057                         if ((tr32(MAC_STATUS) &
5058                              (MAC_STATUS_SYNC_CHANGED |
5059                               MAC_STATUS_CFG_CHANGED)) == 0)
5060                                 break;
5061                 }
5062
5063                 mac_status = tr32(MAC_STATUS);
5064                 if (current_link_up == 0 &&
5065                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5066                     !(mac_status & MAC_STATUS_RCVD_CFG))
5067                         current_link_up = 1;
5068         } else {
5069                 tg3_setup_flow_control(tp, 0, 0);
5070
5071                 /* Forcing 1000FD link up. */
5072                 current_link_up = 1;
5073
5074                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5075                 udelay(40);
5076
5077                 tw32_f(MAC_MODE, tp->mac_mode);
5078                 udelay(40);
5079         }
5080
5081 out:
5082         return current_link_up;
5083 }
5084
5085 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5086 {
5087         u32 orig_pause_cfg;
5088         u16 orig_active_speed;
5089         u8 orig_active_duplex;
5090         u32 mac_status;
5091         int current_link_up;
5092         int i;
5093
5094         orig_pause_cfg = tp->link_config.active_flowctrl;
5095         orig_active_speed = tp->link_config.active_speed;
5096         orig_active_duplex = tp->link_config.active_duplex;
5097
5098         if (!tg3_flag(tp, HW_AUTONEG) &&
5099             netif_carrier_ok(tp->dev) &&
5100             tg3_flag(tp, INIT_COMPLETE)) {
5101                 mac_status = tr32(MAC_STATUS);
5102                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5103                                MAC_STATUS_SIGNAL_DET |
5104                                MAC_STATUS_CFG_CHANGED |
5105                                MAC_STATUS_RCVD_CFG);
5106                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5107                                    MAC_STATUS_SIGNAL_DET)) {
5108                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5109                                             MAC_STATUS_CFG_CHANGED));
5110                         return 0;
5111                 }
5112         }
5113
5114         tw32_f(MAC_TX_AUTO_NEG, 0);
5115
5116         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5117         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5118         tw32_f(MAC_MODE, tp->mac_mode);
5119         udelay(40);
5120
5121         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5122                 tg3_init_bcm8002(tp);
5123
5124         /* Enable link change event even when serdes polling.  */
5125         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5126         udelay(40);
5127
5128         current_link_up = 0;
5129         tp->link_config.rmt_adv = 0;
5130         mac_status = tr32(MAC_STATUS);
5131
5132         if (tg3_flag(tp, HW_AUTONEG))
5133                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5134         else
5135                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5136
5137         tp->napi[0].hw_status->status =
5138                 (SD_STATUS_UPDATED |
5139                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5140
5141         for (i = 0; i < 100; i++) {
5142                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5143                                     MAC_STATUS_CFG_CHANGED));
5144                 udelay(5);
5145                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5146                                          MAC_STATUS_CFG_CHANGED |
5147                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5148                         break;
5149         }
5150
5151         mac_status = tr32(MAC_STATUS);
5152         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5153                 current_link_up = 0;
5154                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5155                     tp->serdes_counter == 0) {
5156                         tw32_f(MAC_MODE, (tp->mac_mode |
5157                                           MAC_MODE_SEND_CONFIGS));
5158                         udelay(1);
5159                         tw32_f(MAC_MODE, tp->mac_mode);
5160                 }
5161         }
5162
5163         if (current_link_up == 1) {
5164                 tp->link_config.active_speed = SPEED_1000;
5165                 tp->link_config.active_duplex = DUPLEX_FULL;
5166                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5167                                     LED_CTRL_LNKLED_OVERRIDE |
5168                                     LED_CTRL_1000MBPS_ON));
5169         } else {
5170                 tp->link_config.active_speed = SPEED_UNKNOWN;
5171                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5173                                     LED_CTRL_LNKLED_OVERRIDE |
5174                                     LED_CTRL_TRAFFIC_OVERRIDE));
5175         }
5176
5177         if (current_link_up != netif_carrier_ok(tp->dev)) {
5178                 if (current_link_up)
5179                         netif_carrier_on(tp->dev);
5180                 else
5181                         netif_carrier_off(tp->dev);
5182                 tg3_link_report(tp);
5183         } else {
5184                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5185                 if (orig_pause_cfg != now_pause_cfg ||
5186                     orig_active_speed != tp->link_config.active_speed ||
5187                     orig_active_duplex != tp->link_config.active_duplex)
5188                         tg3_link_report(tp);
5189         }
5190
5191         return 0;
5192 }
5193
5194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5195 {
5196         int current_link_up, err = 0;
5197         u32 bmsr, bmcr;
5198         u16 current_speed;
5199         u8 current_duplex;
5200         u32 local_adv, remote_adv;
5201
5202         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5203         tw32_f(MAC_MODE, tp->mac_mode);
5204         udelay(40);
5205
5206         tw32(MAC_EVENT, 0);
5207
5208         tw32_f(MAC_STATUS,
5209              (MAC_STATUS_SYNC_CHANGED |
5210               MAC_STATUS_CFG_CHANGED |
5211               MAC_STATUS_MI_COMPLETION |
5212               MAC_STATUS_LNKSTATE_CHANGED));
5213         udelay(40);
5214
5215         if (force_reset)
5216                 tg3_phy_reset(tp);
5217
5218         current_link_up = 0;
5219         current_speed = SPEED_UNKNOWN;
5220         current_duplex = DUPLEX_UNKNOWN;
5221         tp->link_config.rmt_adv = 0;
5222
5223         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5224         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5226                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5227                         bmsr |= BMSR_LSTATUS;
5228                 else
5229                         bmsr &= ~BMSR_LSTATUS;
5230         }
5231
5232         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5233
5234         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5235             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5236                 /* do nothing, just check for link up at the end */
5237         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5238                 u32 adv, newadv;
5239
5240                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5241                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5242                                  ADVERTISE_1000XPAUSE |
5243                                  ADVERTISE_1000XPSE_ASYM |
5244                                  ADVERTISE_SLCT);
5245
5246                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5248
5249                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5250                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5251                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5252                         tg3_writephy(tp, MII_BMCR, bmcr);
5253
5254                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5255                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5256                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5257
5258                         return err;
5259                 }
5260         } else {
5261                 u32 new_bmcr;
5262
5263                 bmcr &= ~BMCR_SPEED1000;
5264                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5265
5266                 if (tp->link_config.duplex == DUPLEX_FULL)
5267                         new_bmcr |= BMCR_FULLDPLX;
5268
5269                 if (new_bmcr != bmcr) {
5270                         /* BMCR_SPEED1000 is a reserved bit that needs
5271                          * to be set on write.
5272                          */
5273                         new_bmcr |= BMCR_SPEED1000;
5274
5275                         /* Force a linkdown */
5276                         if (netif_carrier_ok(tp->dev)) {
5277                                 u32 adv;
5278
5279                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280                                 adv &= ~(ADVERTISE_1000XFULL |
5281                                          ADVERTISE_1000XHALF |
5282                                          ADVERTISE_SLCT);
5283                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5284                                 tg3_writephy(tp, MII_BMCR, bmcr |
5285                                                            BMCR_ANRESTART |
5286                                                            BMCR_ANENABLE);
5287                                 udelay(10);
5288                                 netif_carrier_off(tp->dev);
5289                         }
5290                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5291                         bmcr = new_bmcr;
5292                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5293                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5294                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5295                             ASIC_REV_5714) {
5296                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5297                                         bmsr |= BMSR_LSTATUS;
5298                                 else
5299                                         bmsr &= ~BMSR_LSTATUS;
5300                         }
5301                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5302                 }
5303         }
5304
5305         if (bmsr & BMSR_LSTATUS) {
5306                 current_speed = SPEED_1000;
5307                 current_link_up = 1;
5308                 if (bmcr & BMCR_FULLDPLX)
5309                         current_duplex = DUPLEX_FULL;
5310                 else
5311                         current_duplex = DUPLEX_HALF;
5312
5313                 local_adv = 0;
5314                 remote_adv = 0;
5315
5316                 if (bmcr & BMCR_ANENABLE) {
5317                         u32 common;
5318
5319                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5320                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5321                         common = local_adv & remote_adv;
5322                         if (common & (ADVERTISE_1000XHALF |
5323                                       ADVERTISE_1000XFULL)) {
5324                                 if (common & ADVERTISE_1000XFULL)
5325                                         current_duplex = DUPLEX_FULL;
5326                                 else
5327                                         current_duplex = DUPLEX_HALF;
5328
5329                                 tp->link_config.rmt_adv =
5330                                            mii_adv_to_ethtool_adv_x(remote_adv);
5331                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5332                                 /* Link is up via parallel detect */
5333                         } else {
5334                                 current_link_up = 0;
5335                         }
5336                 }
5337         }
5338
5339         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5340                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5341
5342         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5343         if (tp->link_config.active_duplex == DUPLEX_HALF)
5344                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5345
5346         tw32_f(MAC_MODE, tp->mac_mode);
5347         udelay(40);
5348
5349         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5350
5351         tp->link_config.active_speed = current_speed;
5352         tp->link_config.active_duplex = current_duplex;
5353
5354         if (current_link_up != netif_carrier_ok(tp->dev)) {
5355                 if (current_link_up)
5356                         netif_carrier_on(tp->dev);
5357                 else {
5358                         netif_carrier_off(tp->dev);
5359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5360                 }
5361                 tg3_link_report(tp);
5362         }
5363         return err;
5364 }
5365
5366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5367 {
5368         if (tp->serdes_counter) {
5369                 /* Give autoneg time to complete. */
5370                 tp->serdes_counter--;
5371                 return;
5372         }
5373
5374         if (!netif_carrier_ok(tp->dev) &&
5375             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5376                 u32 bmcr;
5377
5378                 tg3_readphy(tp, MII_BMCR, &bmcr);
5379                 if (bmcr & BMCR_ANENABLE) {
5380                         u32 phy1, phy2;
5381
5382                         /* Select shadow register 0x1f */
5383                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5384                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5385
5386                         /* Select expansion interrupt status register */
5387                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5388                                          MII_TG3_DSP_EXP1_INT_STAT);
5389                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5390                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5391
5392                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5393                                 /* We have signal detect and not receiving
5394                                  * config code words, link is up by parallel
5395                                  * detection.
5396                                  */
5397
5398                                 bmcr &= ~BMCR_ANENABLE;
5399                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5400                                 tg3_writephy(tp, MII_BMCR, bmcr);
5401                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5402                         }
5403                 }
5404         } else if (netif_carrier_ok(tp->dev) &&
5405                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5406                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5407                 u32 phy2;
5408
5409                 /* Select expansion interrupt status register */
5410                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5411                                  MII_TG3_DSP_EXP1_INT_STAT);
5412                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5413                 if (phy2 & 0x20) {
5414                         u32 bmcr;
5415
5416                         /* Config code words received, turn on autoneg. */
5417                         tg3_readphy(tp, MII_BMCR, &bmcr);
5418                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5419
5420                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5421
5422                 }
5423         }
5424 }
5425
5426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5427 {
5428         u32 val;
5429         int err;
5430
5431         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5432                 err = tg3_setup_fiber_phy(tp, force_reset);
5433         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5434                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5435         else
5436                 err = tg3_setup_copper_phy(tp, force_reset);
5437
5438         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5439                 u32 scale;
5440
5441                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5442                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5443                         scale = 65;
5444                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5445                         scale = 6;
5446                 else
5447                         scale = 12;
5448
5449                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5450                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5451                 tw32(GRC_MISC_CFG, val);
5452         }
5453
5454         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5455               (6 << TX_LENGTHS_IPG_SHIFT);
5456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5457                 val |= tr32(MAC_TX_LENGTHS) &
5458                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5459                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5460
5461         if (tp->link_config.active_speed == SPEED_1000 &&
5462             tp->link_config.active_duplex == DUPLEX_HALF)
5463                 tw32(MAC_TX_LENGTHS, val |
5464                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5465         else
5466                 tw32(MAC_TX_LENGTHS, val |
5467                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5468
5469         if (!tg3_flag(tp, 5705_PLUS)) {
5470                 if (netif_carrier_ok(tp->dev)) {
5471                         tw32(HOSTCC_STAT_COAL_TICKS,
5472                              tp->coal.stats_block_coalesce_usecs);
5473                 } else {
5474                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5475                 }
5476         }
5477
5478         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5479                 val = tr32(PCIE_PWR_MGMT_THRESH);
5480                 if (!netif_carrier_ok(tp->dev))
5481                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5482                               tp->pwrmgmt_thresh;
5483                 else
5484                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5485                 tw32(PCIE_PWR_MGMT_THRESH, val);
5486         }
5487
5488         return err;
5489 }
5490
5491 static inline int tg3_irq_sync(struct tg3 *tp)
5492 {
5493         return tp->irq_sync;
5494 }
5495
5496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5497 {
5498         int i;
5499
5500         dst = (u32 *)((u8 *)dst + off);
5501         for (i = 0; i < len; i += sizeof(u32))
5502                 *dst++ = tr32(off + i);
5503 }
5504
5505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5506 {
5507         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5508         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5509         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5510         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5511         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5512         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5513         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5514         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5515         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5516         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5517         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5518         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5519         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5520         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5521         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5522         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5523         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5524         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5525         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5526
5527         if (tg3_flag(tp, SUPPORT_MSIX))
5528                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5529
5530         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5531         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5532         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5533         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5534         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5535         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5536         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5537         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5538
5539         if (!tg3_flag(tp, 5705_PLUS)) {
5540                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5541                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5542                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5543         }
5544
5545         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5546         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5547         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5548         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5549         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5550
5551         if (tg3_flag(tp, NVRAM))
5552                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5553 }
5554
5555 static void tg3_dump_state(struct tg3 *tp)
5556 {
5557         int i;
5558         u32 *regs;
5559
5560         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5561         if (!regs) {
5562                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5563                 return;
5564         }
5565
5566         if (tg3_flag(tp, PCI_EXPRESS)) {
5567                 /* Read up to but not including private PCI registers */
5568                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5569                         regs[i / sizeof(u32)] = tr32(i);
5570         } else
5571                 tg3_dump_legacy_regs(tp, regs);
5572
5573         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5574                 if (!regs[i + 0] && !regs[i + 1] &&
5575                     !regs[i + 2] && !regs[i + 3])
5576                         continue;
5577
5578                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5579                            i * 4,
5580                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5581         }
5582
5583         kfree(regs);
5584
5585         for (i = 0; i < tp->irq_cnt; i++) {
5586                 struct tg3_napi *tnapi = &tp->napi[i];
5587
5588                 /* SW status block */
5589                 netdev_err(tp->dev,
5590                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5591                            i,
5592                            tnapi->hw_status->status,
5593                            tnapi->hw_status->status_tag,
5594                            tnapi->hw_status->rx_jumbo_consumer,
5595                            tnapi->hw_status->rx_consumer,
5596                            tnapi->hw_status->rx_mini_consumer,
5597                            tnapi->hw_status->idx[0].rx_producer,
5598                            tnapi->hw_status->idx[0].tx_consumer);
5599
5600                 netdev_err(tp->dev,
5601                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5602                            i,
5603                            tnapi->last_tag, tnapi->last_irq_tag,
5604                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5605                            tnapi->rx_rcb_ptr,
5606                            tnapi->prodring.rx_std_prod_idx,
5607                            tnapi->prodring.rx_std_cons_idx,
5608                            tnapi->prodring.rx_jmb_prod_idx,
5609                            tnapi->prodring.rx_jmb_cons_idx);
5610         }
5611 }
5612
5613 /* This is called whenever we suspect that the system chipset is re-
5614  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5615  * is bogus tx completions. We try to recover by setting the
5616  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5617  * in the workqueue.
5618  */
5619 static void tg3_tx_recover(struct tg3 *tp)
5620 {
5621         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5622                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5623
5624         netdev_warn(tp->dev,
5625                     "The system may be re-ordering memory-mapped I/O "
5626                     "cycles to the network device, attempting to recover. "
5627                     "Please report the problem to the driver maintainer "
5628                     "and include system chipset information.\n");
5629
5630         spin_lock(&tp->lock);
5631         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5632         spin_unlock(&tp->lock);
5633 }
5634
5635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5636 {
5637         /* Tell compiler to fetch tx indices from memory. */
5638         barrier();
5639         return tnapi->tx_pending -
5640                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5641 }
5642
5643 /* Tigon3 never reports partial packet sends.  So we do not
5644  * need special logic to handle SKBs that have not had all
5645  * of their frags sent yet, like SunGEM does.
5646  */
5647 static void tg3_tx(struct tg3_napi *tnapi)
5648 {
5649         struct tg3 *tp = tnapi->tp;
5650         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5651         u32 sw_idx = tnapi->tx_cons;
5652         struct netdev_queue *txq;
5653         int index = tnapi - tp->napi;
5654         unsigned int pkts_compl = 0, bytes_compl = 0;
5655
5656         if (tg3_flag(tp, ENABLE_TSS))
5657                 index--;
5658
5659         txq = netdev_get_tx_queue(tp->dev, index);
5660
5661         while (sw_idx != hw_idx) {
5662                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5663                 struct sk_buff *skb = ri->skb;
5664                 int i, tx_bug = 0;
5665
5666                 if (unlikely(skb == NULL)) {
5667                         tg3_tx_recover(tp);
5668                         return;
5669                 }
5670
5671                 pci_unmap_single(tp->pdev,
5672                                  dma_unmap_addr(ri, mapping),
5673                                  skb_headlen(skb),
5674                                  PCI_DMA_TODEVICE);
5675
5676                 ri->skb = NULL;
5677
5678                 while (ri->fragmented) {
5679                         ri->fragmented = false;
5680                         sw_idx = NEXT_TX(sw_idx);
5681                         ri = &tnapi->tx_buffers[sw_idx];
5682                 }
5683
5684                 sw_idx = NEXT_TX(sw_idx);
5685
5686                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5687                         ri = &tnapi->tx_buffers[sw_idx];
5688                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5689                                 tx_bug = 1;
5690
5691                         pci_unmap_page(tp->pdev,
5692                                        dma_unmap_addr(ri, mapping),
5693                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5694                                        PCI_DMA_TODEVICE);
5695
5696                         while (ri->fragmented) {
5697                                 ri->fragmented = false;
5698                                 sw_idx = NEXT_TX(sw_idx);
5699                                 ri = &tnapi->tx_buffers[sw_idx];
5700                         }
5701
5702                         sw_idx = NEXT_TX(sw_idx);
5703                 }
5704
5705                 pkts_compl++;
5706                 bytes_compl += skb->len;
5707
5708                 dev_kfree_skb(skb);
5709
5710                 if (unlikely(tx_bug)) {
5711                         tg3_tx_recover(tp);
5712                         return;
5713                 }
5714         }
5715
5716         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5717
5718         tnapi->tx_cons = sw_idx;
5719
5720         /* Need to make the tx_cons update visible to tg3_start_xmit()
5721          * before checking for netif_queue_stopped().  Without the
5722          * memory barrier, there is a small possibility that tg3_start_xmit()
5723          * will miss it and cause the queue to be stopped forever.
5724          */
5725         smp_mb();
5726
5727         if (unlikely(netif_tx_queue_stopped(txq) &&
5728                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5729                 __netif_tx_lock(txq, smp_processor_id());
5730                 if (netif_tx_queue_stopped(txq) &&
5731                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5732                         netif_tx_wake_queue(txq);
5733                 __netif_tx_unlock(txq);
5734         }
5735 }
5736
5737 static void tg3_frag_free(bool is_frag, void *data)
5738 {
5739         if (is_frag)
5740                 put_page(virt_to_head_page(data));
5741         else
5742                 kfree(data);
5743 }
5744
5745 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5746 {
5747         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5748                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5749
5750         if (!ri->data)
5751                 return;
5752
5753         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5754                          map_sz, PCI_DMA_FROMDEVICE);
5755         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5756         ri->data = NULL;
5757 }
5758
5759
5760 /* Returns size of skb allocated or < 0 on error.
5761  *
5762  * We only need to fill in the address because the other members
5763  * of the RX descriptor are invariant, see tg3_init_rings.
5764  *
5765  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5766  * posting buffers we only dirty the first cache line of the RX
5767  * descriptor (containing the address).  Whereas for the RX status
5768  * buffers the cpu only reads the last cacheline of the RX descriptor
5769  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5770  */
5771 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5772                              u32 opaque_key, u32 dest_idx_unmasked,
5773                              unsigned int *frag_size)
5774 {
5775         struct tg3_rx_buffer_desc *desc;
5776         struct ring_info *map;
5777         u8 *data;
5778         dma_addr_t mapping;
5779         int skb_size, data_size, dest_idx;
5780
5781         switch (opaque_key) {
5782         case RXD_OPAQUE_RING_STD:
5783                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5784                 desc = &tpr->rx_std[dest_idx];
5785                 map = &tpr->rx_std_buffers[dest_idx];
5786                 data_size = tp->rx_pkt_map_sz;
5787                 break;
5788
5789         case RXD_OPAQUE_RING_JUMBO:
5790                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5791                 desc = &tpr->rx_jmb[dest_idx].std;
5792                 map = &tpr->rx_jmb_buffers[dest_idx];
5793                 data_size = TG3_RX_JMB_MAP_SZ;
5794                 break;
5795
5796         default:
5797                 return -EINVAL;
5798         }
5799
5800         /* Do not overwrite any of the map or rp information
5801          * until we are sure we can commit to a new buffer.
5802          *
5803          * Callers depend upon this behavior and assume that
5804          * we leave everything unchanged if we fail.
5805          */
5806         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5807                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5808         if (skb_size <= PAGE_SIZE) {
5809                 data = netdev_alloc_frag(skb_size);
5810                 *frag_size = skb_size;
5811         } else {
5812                 data = kmalloc(skb_size, GFP_ATOMIC);
5813                 *frag_size = 0;
5814         }
5815         if (!data)
5816                 return -ENOMEM;
5817
5818         mapping = pci_map_single(tp->pdev,
5819                                  data + TG3_RX_OFFSET(tp),
5820                                  data_size,
5821                                  PCI_DMA_FROMDEVICE);
5822         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5823                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5824                 return -EIO;
5825         }
5826
5827         map->data = data;
5828         dma_unmap_addr_set(map, mapping, mapping);
5829
5830         desc->addr_hi = ((u64)mapping >> 32);
5831         desc->addr_lo = ((u64)mapping & 0xffffffff);
5832
5833         return data_size;
5834 }
5835
5836 /* We only need to move over in the address because the other
5837  * members of the RX descriptor are invariant.  See notes above
5838  * tg3_alloc_rx_data for full details.
5839  */
5840 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5841                            struct tg3_rx_prodring_set *dpr,
5842                            u32 opaque_key, int src_idx,
5843                            u32 dest_idx_unmasked)
5844 {
5845         struct tg3 *tp = tnapi->tp;
5846         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5847         struct ring_info *src_map, *dest_map;
5848         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5849         int dest_idx;
5850
5851         switch (opaque_key) {
5852         case RXD_OPAQUE_RING_STD:
5853                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5854                 dest_desc = &dpr->rx_std[dest_idx];
5855                 dest_map = &dpr->rx_std_buffers[dest_idx];
5856                 src_desc = &spr->rx_std[src_idx];
5857                 src_map = &spr->rx_std_buffers[src_idx];
5858                 break;
5859
5860         case RXD_OPAQUE_RING_JUMBO:
5861                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5862                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5863                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5864                 src_desc = &spr->rx_jmb[src_idx].std;
5865                 src_map = &spr->rx_jmb_buffers[src_idx];
5866                 break;
5867
5868         default:
5869                 return;
5870         }
5871
5872         dest_map->data = src_map->data;
5873         dma_unmap_addr_set(dest_map, mapping,
5874                            dma_unmap_addr(src_map, mapping));
5875         dest_desc->addr_hi = src_desc->addr_hi;
5876         dest_desc->addr_lo = src_desc->addr_lo;
5877
5878         /* Ensure that the update to the skb happens after the physical
5879          * addresses have been transferred to the new BD location.
5880          */
5881         smp_wmb();
5882
5883         src_map->data = NULL;
5884 }
5885
5886 /* The RX ring scheme is composed of multiple rings which post fresh
5887  * buffers to the chip, and one special ring the chip uses to report
5888  * status back to the host.
5889  *
5890  * The special ring reports the status of received packets to the
5891  * host.  The chip does not write into the original descriptor the
5892  * RX buffer was obtained from.  The chip simply takes the original
5893  * descriptor as provided by the host, updates the status and length
5894  * field, then writes this into the next status ring entry.
5895  *
5896  * Each ring the host uses to post buffers to the chip is described
5897  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5898  * it is first placed into the on-chip ram.  When the packet's length
5899  * is known, it walks down the TG3_BDINFO entries to select the ring.
5900  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5901  * which is within the range of the new packet's length is chosen.
5902  *
5903  * The "separate ring for rx status" scheme may sound queer, but it makes
5904  * sense from a cache coherency perspective.  If only the host writes
5905  * to the buffer post rings, and only the chip writes to the rx status
5906  * rings, then cache lines never move beyond shared-modified state.
5907  * If both the host and chip were to write into the same ring, cache line
5908  * eviction could occur since both entities want it in an exclusive state.
5909  */
5910 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5911 {
5912         struct tg3 *tp = tnapi->tp;
5913         u32 work_mask, rx_std_posted = 0;
5914         u32 std_prod_idx, jmb_prod_idx;
5915         u32 sw_idx = tnapi->rx_rcb_ptr;
5916         u16 hw_idx;
5917         int received;
5918         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5919
5920         hw_idx = *(tnapi->rx_rcb_prod_idx);
5921         /*
5922          * We need to order the read of hw_idx and the read of
5923          * the opaque cookie.
5924          */
5925         rmb();
5926         work_mask = 0;
5927         received = 0;
5928         std_prod_idx = tpr->rx_std_prod_idx;
5929         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5930         while (sw_idx != hw_idx && budget > 0) {
5931                 struct ring_info *ri;
5932                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5933                 unsigned int len;
5934                 struct sk_buff *skb;
5935                 dma_addr_t dma_addr;
5936                 u32 opaque_key, desc_idx, *post_ptr;
5937                 u8 *data;
5938
5939                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5940                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5941                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5942                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5943                         dma_addr = dma_unmap_addr(ri, mapping);
5944                         data = ri->data;
5945                         post_ptr = &std_prod_idx;
5946                         rx_std_posted++;
5947                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5948                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5949                         dma_addr = dma_unmap_addr(ri, mapping);
5950                         data = ri->data;
5951                         post_ptr = &jmb_prod_idx;
5952                 } else
5953                         goto next_pkt_nopost;
5954
5955                 work_mask |= opaque_key;
5956
5957                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5958                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5959                 drop_it:
5960                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5961                                        desc_idx, *post_ptr);
5962                 drop_it_no_recycle:
5963                         /* Other statistics kept track of by card. */
5964                         tp->rx_dropped++;
5965                         goto next_pkt;
5966                 }
5967
5968                 prefetch(data + TG3_RX_OFFSET(tp));
5969                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5970                       ETH_FCS_LEN;
5971
5972                 if (len > TG3_RX_COPY_THRESH(tp)) {
5973                         int skb_size;
5974                         unsigned int frag_size;
5975
5976                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5977                                                     *post_ptr, &frag_size);
5978                         if (skb_size < 0)
5979                                 goto drop_it;
5980
5981                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5982                                          PCI_DMA_FROMDEVICE);
5983
5984                         skb = build_skb(data, frag_size);
5985                         if (!skb) {
5986                                 tg3_frag_free(frag_size != 0, data);
5987                                 goto drop_it_no_recycle;
5988                         }
5989                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5990                         /* Ensure that the update to the data happens
5991                          * after the usage of the old DMA mapping.
5992                          */
5993                         smp_wmb();
5994
5995                         ri->data = NULL;
5996
5997                 } else {
5998                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5999                                        desc_idx, *post_ptr);
6000
6001                         skb = netdev_alloc_skb(tp->dev,
6002                                                len + TG3_RAW_IP_ALIGN);
6003                         if (skb == NULL)
6004                                 goto drop_it_no_recycle;
6005
6006                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6007                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6008                         memcpy(skb->data,
6009                                data + TG3_RX_OFFSET(tp),
6010                                len);
6011                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6012                 }
6013
6014                 skb_put(skb, len);
6015                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6016                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6017                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6018                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6019                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6020                 else
6021                         skb_checksum_none_assert(skb);
6022
6023                 skb->protocol = eth_type_trans(skb, tp->dev);
6024
6025                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6026                     skb->protocol != htons(ETH_P_8021Q)) {
6027                         dev_kfree_skb(skb);
6028                         goto drop_it_no_recycle;
6029                 }
6030
6031                 if (desc->type_flags & RXD_FLAG_VLAN &&
6032                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6033                         __vlan_hwaccel_put_tag(skb,
6034                                                desc->err_vlan & RXD_VLAN_MASK);
6035
6036                 napi_gro_receive(&tnapi->napi, skb);
6037
6038                 received++;
6039                 budget--;
6040
6041 next_pkt:
6042                 (*post_ptr)++;
6043
6044                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6045                         tpr->rx_std_prod_idx = std_prod_idx &
6046                                                tp->rx_std_ring_mask;
6047                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6048                                      tpr->rx_std_prod_idx);
6049                         work_mask &= ~RXD_OPAQUE_RING_STD;
6050                         rx_std_posted = 0;
6051                 }
6052 next_pkt_nopost:
6053                 sw_idx++;
6054                 sw_idx &= tp->rx_ret_ring_mask;
6055
6056                 /* Refresh hw_idx to see if there is new work */
6057                 if (sw_idx == hw_idx) {
6058                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6059                         rmb();
6060                 }
6061         }
6062
6063         /* ACK the status ring. */
6064         tnapi->rx_rcb_ptr = sw_idx;
6065         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6066
6067         /* Refill RX ring(s). */
6068         if (!tg3_flag(tp, ENABLE_RSS)) {
6069                 /* Sync BD data before updating mailbox */
6070                 wmb();
6071
6072                 if (work_mask & RXD_OPAQUE_RING_STD) {
6073                         tpr->rx_std_prod_idx = std_prod_idx &
6074                                                tp->rx_std_ring_mask;
6075                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6076                                      tpr->rx_std_prod_idx);
6077                 }
6078                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6079                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6080                                                tp->rx_jmb_ring_mask;
6081                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6082                                      tpr->rx_jmb_prod_idx);
6083                 }
6084                 mmiowb();
6085         } else if (work_mask) {
6086                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6087                  * updated before the producer indices can be updated.
6088                  */
6089                 smp_wmb();
6090
6091                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6092                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6093
6094                 if (tnapi != &tp->napi[1]) {
6095                         tp->rx_refill = true;
6096                         napi_schedule(&tp->napi[1].napi);
6097                 }
6098         }
6099
6100         return received;
6101 }
6102
6103 static void tg3_poll_link(struct tg3 *tp)
6104 {
6105         /* handle link change and other phy events */
6106         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6107                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6108
6109                 if (sblk->status & SD_STATUS_LINK_CHG) {
6110                         sblk->status = SD_STATUS_UPDATED |
6111                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6112                         spin_lock(&tp->lock);
6113                         if (tg3_flag(tp, USE_PHYLIB)) {
6114                                 tw32_f(MAC_STATUS,
6115                                      (MAC_STATUS_SYNC_CHANGED |
6116                                       MAC_STATUS_CFG_CHANGED |
6117                                       MAC_STATUS_MI_COMPLETION |
6118                                       MAC_STATUS_LNKSTATE_CHANGED));
6119                                 udelay(40);
6120                         } else
6121                                 tg3_setup_phy(tp, 0);
6122                         spin_unlock(&tp->lock);
6123                 }
6124         }
6125 }
6126
6127 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6128                                 struct tg3_rx_prodring_set *dpr,
6129                                 struct tg3_rx_prodring_set *spr)
6130 {
6131         u32 si, di, cpycnt, src_prod_idx;
6132         int i, err = 0;
6133
6134         while (1) {
6135                 src_prod_idx = spr->rx_std_prod_idx;
6136
6137                 /* Make sure updates to the rx_std_buffers[] entries and the
6138                  * standard producer index are seen in the correct order.
6139                  */
6140                 smp_rmb();
6141
6142                 if (spr->rx_std_cons_idx == src_prod_idx)
6143                         break;
6144
6145                 if (spr->rx_std_cons_idx < src_prod_idx)
6146                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6147                 else
6148                         cpycnt = tp->rx_std_ring_mask + 1 -
6149                                  spr->rx_std_cons_idx;
6150
6151                 cpycnt = min(cpycnt,
6152                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6153
6154                 si = spr->rx_std_cons_idx;
6155                 di = dpr->rx_std_prod_idx;
6156
6157                 for (i = di; i < di + cpycnt; i++) {
6158                         if (dpr->rx_std_buffers[i].data) {
6159                                 cpycnt = i - di;
6160                                 err = -ENOSPC;
6161                                 break;
6162                         }
6163                 }
6164
6165                 if (!cpycnt)
6166                         break;
6167
6168                 /* Ensure that updates to the rx_std_buffers ring and the
6169                  * shadowed hardware producer ring from tg3_recycle_skb() are
6170                  * ordered correctly WRT the skb check above.
6171                  */
6172                 smp_rmb();
6173
6174                 memcpy(&dpr->rx_std_buffers[di],
6175                        &spr->rx_std_buffers[si],
6176                        cpycnt * sizeof(struct ring_info));
6177
6178                 for (i = 0; i < cpycnt; i++, di++, si++) {
6179                         struct tg3_rx_buffer_desc *sbd, *dbd;
6180                         sbd = &spr->rx_std[si];
6181                         dbd = &dpr->rx_std[di];
6182                         dbd->addr_hi = sbd->addr_hi;
6183                         dbd->addr_lo = sbd->addr_lo;
6184                 }
6185
6186                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6187                                        tp->rx_std_ring_mask;
6188                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6189                                        tp->rx_std_ring_mask;
6190         }
6191
6192         while (1) {
6193                 src_prod_idx = spr->rx_jmb_prod_idx;
6194
6195                 /* Make sure updates to the rx_jmb_buffers[] entries and
6196                  * the jumbo producer index are seen in the correct order.
6197                  */
6198                 smp_rmb();
6199
6200                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6201                         break;
6202
6203                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6204                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6205                 else
6206                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6207                                  spr->rx_jmb_cons_idx;
6208
6209                 cpycnt = min(cpycnt,
6210                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6211
6212                 si = spr->rx_jmb_cons_idx;
6213                 di = dpr->rx_jmb_prod_idx;
6214
6215                 for (i = di; i < di + cpycnt; i++) {
6216                         if (dpr->rx_jmb_buffers[i].data) {
6217                                 cpycnt = i - di;
6218                                 err = -ENOSPC;
6219                                 break;
6220                         }
6221                 }
6222
6223                 if (!cpycnt)
6224                         break;
6225
6226                 /* Ensure that updates to the rx_jmb_buffers ring and the
6227                  * shadowed hardware producer ring from tg3_recycle_skb() are
6228                  * ordered correctly WRT the skb check above.
6229                  */
6230                 smp_rmb();
6231
6232                 memcpy(&dpr->rx_jmb_buffers[di],
6233                        &spr->rx_jmb_buffers[si],
6234                        cpycnt * sizeof(struct ring_info));
6235
6236                 for (i = 0; i < cpycnt; i++, di++, si++) {
6237                         struct tg3_rx_buffer_desc *sbd, *dbd;
6238                         sbd = &spr->rx_jmb[si].std;
6239                         dbd = &dpr->rx_jmb[di].std;
6240                         dbd->addr_hi = sbd->addr_hi;
6241                         dbd->addr_lo = sbd->addr_lo;
6242                 }
6243
6244                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6245                                        tp->rx_jmb_ring_mask;
6246                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6247                                        tp->rx_jmb_ring_mask;
6248         }
6249
6250         return err;
6251 }
6252
6253 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6254 {
6255         struct tg3 *tp = tnapi->tp;
6256
6257         /* run TX completion thread */
6258         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6259                 tg3_tx(tnapi);
6260                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6261                         return work_done;
6262         }
6263
6264         if (!tnapi->rx_rcb_prod_idx)
6265                 return work_done;
6266
6267         /* run RX thread, within the bounds set by NAPI.
6268          * All RX "locking" is done by ensuring outside
6269          * code synchronizes with tg3->napi.poll()
6270          */
6271         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6272                 work_done += tg3_rx(tnapi, budget - work_done);
6273
6274         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6275                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6276                 int i, err = 0;
6277                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6278                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6279
6280                 tp->rx_refill = false;
6281                 for (i = 1; i < tp->irq_cnt; i++)
6282                         err |= tg3_rx_prodring_xfer(tp, dpr,
6283                                                     &tp->napi[i].prodring);
6284
6285                 wmb();
6286
6287                 if (std_prod_idx != dpr->rx_std_prod_idx)
6288                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6289                                      dpr->rx_std_prod_idx);
6290
6291                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6292                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6293                                      dpr->rx_jmb_prod_idx);
6294
6295                 mmiowb();
6296
6297                 if (err)
6298                         tw32_f(HOSTCC_MODE, tp->coal_now);
6299         }
6300
6301         return work_done;
6302 }
6303
6304 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6305 {
6306         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6307                 schedule_work(&tp->reset_task);
6308 }
6309
6310 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6311 {
6312         cancel_work_sync(&tp->reset_task);
6313         tg3_flag_clear(tp, RESET_TASK_PENDING);
6314         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6315 }
6316
6317 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6318 {
6319         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6320         struct tg3 *tp = tnapi->tp;
6321         int work_done = 0;
6322         struct tg3_hw_status *sblk = tnapi->hw_status;
6323
6324         while (1) {
6325                 work_done = tg3_poll_work(tnapi, work_done, budget);
6326
6327                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6328                         goto tx_recovery;
6329
6330                 if (unlikely(work_done >= budget))
6331                         break;
6332
6333                 /* tp->last_tag is used in tg3_int_reenable() below
6334                  * to tell the hw how much work has been processed,
6335                  * so we must read it before checking for more work.
6336                  */
6337                 tnapi->last_tag = sblk->status_tag;
6338                 tnapi->last_irq_tag = tnapi->last_tag;
6339                 rmb();
6340
6341                 /* check for RX/TX work to do */
6342                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6343                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6344
6345                         /* This test here is not race free, but will reduce
6346                          * the number of interrupts by looping again.
6347                          */
6348                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6349                                 continue;
6350
6351                         napi_complete(napi);
6352                         /* Reenable interrupts. */
6353                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6354
6355                         /* This test here is synchronized by napi_schedule()
6356                          * and napi_complete() to close the race condition.
6357                          */
6358                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6359                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6360                                                   HOSTCC_MODE_ENABLE |
6361                                                   tnapi->coal_now);
6362                         }
6363                         mmiowb();
6364                         break;
6365                 }
6366         }
6367
6368         return work_done;
6369
6370 tx_recovery:
6371         /* work_done is guaranteed to be less than budget. */
6372         napi_complete(napi);
6373         tg3_reset_task_schedule(tp);
6374         return work_done;
6375 }
6376
6377 static void tg3_process_error(struct tg3 *tp)
6378 {
6379         u32 val;
6380         bool real_error = false;
6381
6382         if (tg3_flag(tp, ERROR_PROCESSED))
6383                 return;
6384
6385         /* Check Flow Attention register */
6386         val = tr32(HOSTCC_FLOW_ATTN);
6387         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6388                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6389                 real_error = true;
6390         }
6391
6392         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6393                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6394                 real_error = true;
6395         }
6396
6397         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6398                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6399                 real_error = true;
6400         }
6401
6402         if (!real_error)
6403                 return;
6404
6405         tg3_dump_state(tp);
6406
6407         tg3_flag_set(tp, ERROR_PROCESSED);
6408         tg3_reset_task_schedule(tp);
6409 }
6410
6411 static int tg3_poll(struct napi_struct *napi, int budget)
6412 {
6413         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6414         struct tg3 *tp = tnapi->tp;
6415         int work_done = 0;
6416         struct tg3_hw_status *sblk = tnapi->hw_status;
6417
6418         while (1) {
6419                 if (sblk->status & SD_STATUS_ERROR)
6420                         tg3_process_error(tp);
6421
6422                 tg3_poll_link(tp);
6423
6424                 work_done = tg3_poll_work(tnapi, work_done, budget);
6425
6426                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6427                         goto tx_recovery;
6428
6429                 if (unlikely(work_done >= budget))
6430                         break;
6431
6432                 if (tg3_flag(tp, TAGGED_STATUS)) {
6433                         /* tp->last_tag is used in tg3_int_reenable() below
6434                          * to tell the hw how much work has been processed,
6435                          * so we must read it before checking for more work.
6436                          */
6437                         tnapi->last_tag = sblk->status_tag;
6438                         tnapi->last_irq_tag = tnapi->last_tag;
6439                         rmb();
6440                 } else
6441                         sblk->status &= ~SD_STATUS_UPDATED;
6442
6443                 if (likely(!tg3_has_work(tnapi))) {
6444                         napi_complete(napi);
6445                         tg3_int_reenable(tnapi);
6446                         break;
6447                 }
6448         }
6449
6450         return work_done;
6451
6452 tx_recovery:
6453         /* work_done is guaranteed to be less than budget. */
6454         napi_complete(napi);
6455         tg3_reset_task_schedule(tp);
6456         return work_done;
6457 }
6458
6459 static void tg3_napi_disable(struct tg3 *tp)
6460 {
6461         int i;
6462
6463         for (i = tp->irq_cnt - 1; i >= 0; i--)
6464                 napi_disable(&tp->napi[i].napi);
6465 }
6466
6467 static void tg3_napi_enable(struct tg3 *tp)
6468 {
6469         int i;
6470
6471         for (i = 0; i < tp->irq_cnt; i++)
6472                 napi_enable(&tp->napi[i].napi);
6473 }
6474
6475 static void tg3_napi_init(struct tg3 *tp)
6476 {
6477         int i;
6478
6479         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6480         for (i = 1; i < tp->irq_cnt; i++)
6481                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6482 }
6483
6484 static void tg3_napi_fini(struct tg3 *tp)
6485 {
6486         int i;
6487
6488         for (i = 0; i < tp->irq_cnt; i++)
6489                 netif_napi_del(&tp->napi[i].napi);
6490 }
6491
6492 static inline void tg3_netif_stop(struct tg3 *tp)
6493 {
6494         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6495         tg3_napi_disable(tp);
6496         netif_tx_disable(tp->dev);
6497 }
6498
6499 static inline void tg3_netif_start(struct tg3 *tp)
6500 {
6501         /* NOTE: unconditional netif_tx_wake_all_queues is only
6502          * appropriate so long as all callers are assured to
6503          * have free tx slots (such as after tg3_init_hw)
6504          */
6505         netif_tx_wake_all_queues(tp->dev);
6506
6507         tg3_napi_enable(tp);
6508         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6509         tg3_enable_ints(tp);
6510 }
6511
6512 static void tg3_irq_quiesce(struct tg3 *tp)
6513 {
6514         int i;
6515
6516         BUG_ON(tp->irq_sync);
6517
6518         tp->irq_sync = 1;
6519         smp_mb();
6520
6521         for (i = 0; i < tp->irq_cnt; i++)
6522                 synchronize_irq(tp->napi[i].irq_vec);
6523 }
6524
6525 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6526  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6527  * with as well.  Most of the time, this is not necessary except when
6528  * shutting down the device.
6529  */
6530 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6531 {
6532         spin_lock_bh(&tp->lock);
6533         if (irq_sync)
6534                 tg3_irq_quiesce(tp);
6535 }
6536
6537 static inline void tg3_full_unlock(struct tg3 *tp)
6538 {
6539         spin_unlock_bh(&tp->lock);
6540 }
6541
6542 /* One-shot MSI handler - Chip automatically disables interrupt
6543  * after sending MSI so driver doesn't have to do it.
6544  */
6545 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6546 {
6547         struct tg3_napi *tnapi = dev_id;
6548         struct tg3 *tp = tnapi->tp;
6549
6550         prefetch(tnapi->hw_status);
6551         if (tnapi->rx_rcb)
6552                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6553
6554         if (likely(!tg3_irq_sync(tp)))
6555                 napi_schedule(&tnapi->napi);
6556
6557         return IRQ_HANDLED;
6558 }
6559
6560 /* MSI ISR - No need to check for interrupt sharing and no need to
6561  * flush status block and interrupt mailbox. PCI ordering rules
6562  * guarantee that MSI will arrive after the status block.
6563  */
6564 static irqreturn_t tg3_msi(int irq, void *dev_id)
6565 {
6566         struct tg3_napi *tnapi = dev_id;
6567         struct tg3 *tp = tnapi->tp;
6568
6569         prefetch(tnapi->hw_status);
6570         if (tnapi->rx_rcb)
6571                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6572         /*
6573          * Writing any value to intr-mbox-0 clears PCI INTA# and
6574          * chip-internal interrupt pending events.
6575          * Writing non-zero to intr-mbox-0 additional tells the
6576          * NIC to stop sending us irqs, engaging "in-intr-handler"
6577          * event coalescing.
6578          */
6579         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6580         if (likely(!tg3_irq_sync(tp)))
6581                 napi_schedule(&tnapi->napi);
6582
6583         return IRQ_RETVAL(1);
6584 }
6585
6586 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6587 {
6588         struct tg3_napi *tnapi = dev_id;
6589         struct tg3 *tp = tnapi->tp;
6590         struct tg3_hw_status *sblk = tnapi->hw_status;
6591         unsigned int handled = 1;
6592
6593         /* In INTx mode, it is possible for the interrupt to arrive at
6594          * the CPU before the status block posted prior to the interrupt.
6595          * Reading the PCI State register will confirm whether the
6596          * interrupt is ours and will flush the status block.
6597          */
6598         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6599                 if (tg3_flag(tp, CHIP_RESETTING) ||
6600                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6601                         handled = 0;
6602                         goto out;
6603                 }
6604         }
6605
6606         /*
6607          * Writing any value to intr-mbox-0 clears PCI INTA# and
6608          * chip-internal interrupt pending events.
6609          * Writing non-zero to intr-mbox-0 additional tells the
6610          * NIC to stop sending us irqs, engaging "in-intr-handler"
6611          * event coalescing.
6612          *
6613          * Flush the mailbox to de-assert the IRQ immediately to prevent
6614          * spurious interrupts.  The flush impacts performance but
6615          * excessive spurious interrupts can be worse in some cases.
6616          */
6617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6618         if (tg3_irq_sync(tp))
6619                 goto out;
6620         sblk->status &= ~SD_STATUS_UPDATED;
6621         if (likely(tg3_has_work(tnapi))) {
6622                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6623                 napi_schedule(&tnapi->napi);
6624         } else {
6625                 /* No work, shared interrupt perhaps?  re-enable
6626                  * interrupts, and flush that PCI write
6627                  */
6628                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6629                                0x00000000);
6630         }
6631 out:
6632         return IRQ_RETVAL(handled);
6633 }
6634
6635 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6636 {
6637         struct tg3_napi *tnapi = dev_id;
6638         struct tg3 *tp = tnapi->tp;
6639         struct tg3_hw_status *sblk = tnapi->hw_status;
6640         unsigned int handled = 1;
6641
6642         /* In INTx mode, it is possible for the interrupt to arrive at
6643          * the CPU before the status block posted prior to the interrupt.
6644          * Reading the PCI State register will confirm whether the
6645          * interrupt is ours and will flush the status block.
6646          */
6647         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6648                 if (tg3_flag(tp, CHIP_RESETTING) ||
6649                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6650                         handled = 0;
6651                         goto out;
6652                 }
6653         }
6654
6655         /*
6656          * writing any value to intr-mbox-0 clears PCI INTA# and
6657          * chip-internal interrupt pending events.
6658          * writing non-zero to intr-mbox-0 additional tells the
6659          * NIC to stop sending us irqs, engaging "in-intr-handler"
6660          * event coalescing.
6661          *
6662          * Flush the mailbox to de-assert the IRQ immediately to prevent
6663          * spurious interrupts.  The flush impacts performance but
6664          * excessive spurious interrupts can be worse in some cases.
6665          */
6666         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6667
6668         /*
6669          * In a shared interrupt configuration, sometimes other devices'
6670          * interrupts will scream.  We record the current status tag here
6671          * so that the above check can report that the screaming interrupts
6672          * are unhandled.  Eventually they will be silenced.
6673          */
6674         tnapi->last_irq_tag = sblk->status_tag;
6675
6676         if (tg3_irq_sync(tp))
6677                 goto out;
6678
6679         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6680
6681         napi_schedule(&tnapi->napi);
6682
6683 out:
6684         return IRQ_RETVAL(handled);
6685 }
6686
6687 /* ISR for interrupt test */
6688 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6689 {
6690         struct tg3_napi *tnapi = dev_id;
6691         struct tg3 *tp = tnapi->tp;
6692         struct tg3_hw_status *sblk = tnapi->hw_status;
6693
6694         if ((sblk->status & SD_STATUS_UPDATED) ||
6695             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6696                 tg3_disable_ints(tp);
6697                 return IRQ_RETVAL(1);
6698         }
6699         return IRQ_RETVAL(0);
6700 }
6701
6702 #ifdef CONFIG_NET_POLL_CONTROLLER
6703 static void tg3_poll_controller(struct net_device *dev)
6704 {
6705         int i;
6706         struct tg3 *tp = netdev_priv(dev);
6707
6708         for (i = 0; i < tp->irq_cnt; i++)
6709                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6710 }
6711 #endif
6712
6713 static void tg3_tx_timeout(struct net_device *dev)
6714 {
6715         struct tg3 *tp = netdev_priv(dev);
6716
6717         if (netif_msg_tx_err(tp)) {
6718                 netdev_err(dev, "transmit timed out, resetting\n");
6719                 tg3_dump_state(tp);
6720         }
6721
6722         tg3_reset_task_schedule(tp);
6723 }
6724
6725 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6726 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6727 {
6728         u32 base = (u32) mapping & 0xffffffff;
6729
6730         return (base > 0xffffdcc0) && (base + len + 8 < base);
6731 }
6732
6733 /* Test for DMA addresses > 40-bit */
6734 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6735                                           int len)
6736 {
6737 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6738         if (tg3_flag(tp, 40BIT_DMA_BUG))
6739                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6740         return 0;
6741 #else
6742         return 0;
6743 #endif
6744 }
6745
6746 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6747                                  dma_addr_t mapping, u32 len, u32 flags,
6748                                  u32 mss, u32 vlan)
6749 {
6750         txbd->addr_hi = ((u64) mapping >> 32);
6751         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6752         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6753         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6754 }
6755
6756 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6757                             dma_addr_t map, u32 len, u32 flags,
6758                             u32 mss, u32 vlan)
6759 {
6760         struct tg3 *tp = tnapi->tp;
6761         bool hwbug = false;
6762
6763         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6764                 hwbug = true;
6765
6766         if (tg3_4g_overflow_test(map, len))
6767                 hwbug = true;
6768
6769         if (tg3_40bit_overflow_test(tp, map, len))
6770                 hwbug = true;
6771
6772         if (tp->dma_limit) {
6773                 u32 prvidx = *entry;
6774                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6775                 while (len > tp->dma_limit && *budget) {
6776                         u32 frag_len = tp->dma_limit;
6777                         len -= tp->dma_limit;
6778
6779                         /* Avoid the 8byte DMA problem */
6780                         if (len <= 8) {
6781                                 len += tp->dma_limit / 2;
6782                                 frag_len = tp->dma_limit / 2;
6783                         }
6784
6785                         tnapi->tx_buffers[*entry].fragmented = true;
6786
6787                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6788                                       frag_len, tmp_flag, mss, vlan);
6789                         *budget -= 1;
6790                         prvidx = *entry;
6791                         *entry = NEXT_TX(*entry);
6792
6793                         map += frag_len;
6794                 }
6795
6796                 if (len) {
6797                         if (*budget) {
6798                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6799                                               len, flags, mss, vlan);
6800                                 *budget -= 1;
6801                                 *entry = NEXT_TX(*entry);
6802                         } else {
6803                                 hwbug = true;
6804                                 tnapi->tx_buffers[prvidx].fragmented = false;
6805                         }
6806                 }
6807         } else {
6808                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6809                               len, flags, mss, vlan);
6810                 *entry = NEXT_TX(*entry);
6811         }
6812
6813         return hwbug;
6814 }
6815
6816 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6817 {
6818         int i;
6819         struct sk_buff *skb;
6820         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6821
6822         skb = txb->skb;
6823         txb->skb = NULL;
6824
6825         pci_unmap_single(tnapi->tp->pdev,
6826                          dma_unmap_addr(txb, mapping),
6827                          skb_headlen(skb),
6828                          PCI_DMA_TODEVICE);
6829
6830         while (txb->fragmented) {
6831                 txb->fragmented = false;
6832                 entry = NEXT_TX(entry);
6833                 txb = &tnapi->tx_buffers[entry];
6834         }
6835
6836         for (i = 0; i <= last; i++) {
6837                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6838
6839                 entry = NEXT_TX(entry);
6840                 txb = &tnapi->tx_buffers[entry];
6841
6842                 pci_unmap_page(tnapi->tp->pdev,
6843                                dma_unmap_addr(txb, mapping),
6844                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6845
6846                 while (txb->fragmented) {
6847                         txb->fragmented = false;
6848                         entry = NEXT_TX(entry);
6849                         txb = &tnapi->tx_buffers[entry];
6850                 }
6851         }
6852 }
6853
6854 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6855 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6856                                        struct sk_buff **pskb,
6857                                        u32 *entry, u32 *budget,
6858                                        u32 base_flags, u32 mss, u32 vlan)
6859 {
6860         struct tg3 *tp = tnapi->tp;
6861         struct sk_buff *new_skb, *skb = *pskb;
6862         dma_addr_t new_addr = 0;
6863         int ret = 0;
6864
6865         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6866                 new_skb = skb_copy(skb, GFP_ATOMIC);
6867         else {
6868                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6869
6870                 new_skb = skb_copy_expand(skb,
6871                                           skb_headroom(skb) + more_headroom,
6872                                           skb_tailroom(skb), GFP_ATOMIC);
6873         }
6874
6875         if (!new_skb) {
6876                 ret = -1;
6877         } else {
6878                 /* New SKB is guaranteed to be linear. */
6879                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6880                                           PCI_DMA_TODEVICE);
6881                 /* Make sure the mapping succeeded */
6882                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6883                         dev_kfree_skb(new_skb);
6884                         ret = -1;
6885                 } else {
6886                         u32 save_entry = *entry;
6887
6888                         base_flags |= TXD_FLAG_END;
6889
6890                         tnapi->tx_buffers[*entry].skb = new_skb;
6891                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6892                                            mapping, new_addr);
6893
6894                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6895                                             new_skb->len, base_flags,
6896                                             mss, vlan)) {
6897                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6898                                 dev_kfree_skb(new_skb);
6899                                 ret = -1;
6900                         }
6901                 }
6902         }
6903
6904         dev_kfree_skb(skb);
6905         *pskb = new_skb;
6906         return ret;
6907 }
6908
6909 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6910
6911 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6912  * TSO header is greater than 80 bytes.
6913  */
6914 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6915 {
6916         struct sk_buff *segs, *nskb;
6917         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6918
6919         /* Estimate the number of fragments in the worst case */
6920         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6921                 netif_stop_queue(tp->dev);
6922
6923                 /* netif_tx_stop_queue() must be done before checking
6924                  * checking tx index in tg3_tx_avail() below, because in
6925                  * tg3_tx(), we update tx index before checking for
6926                  * netif_tx_queue_stopped().
6927                  */
6928                 smp_mb();
6929                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6930                         return NETDEV_TX_BUSY;
6931
6932                 netif_wake_queue(tp->dev);
6933         }
6934
6935         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6936         if (IS_ERR(segs))
6937                 goto tg3_tso_bug_end;
6938
6939         do {
6940                 nskb = segs;
6941                 segs = segs->next;
6942                 nskb->next = NULL;
6943                 tg3_start_xmit(nskb, tp->dev);
6944         } while (segs);
6945
6946 tg3_tso_bug_end:
6947         dev_kfree_skb(skb);
6948
6949         return NETDEV_TX_OK;
6950 }
6951
6952 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6953  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6954  */
6955 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6956 {
6957         struct tg3 *tp = netdev_priv(dev);
6958         u32 len, entry, base_flags, mss, vlan = 0;
6959         u32 budget;
6960         int i = -1, would_hit_hwbug;
6961         dma_addr_t mapping;
6962         struct tg3_napi *tnapi;
6963         struct netdev_queue *txq;
6964         unsigned int last;
6965
6966         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6967         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6968         if (tg3_flag(tp, ENABLE_TSS))
6969                 tnapi++;
6970
6971         budget = tg3_tx_avail(tnapi);
6972
6973         /* We are running in BH disabled context with netif_tx_lock
6974          * and TX reclaim runs via tp->napi.poll inside of a software
6975          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6976          * no IRQ context deadlocks to worry about either.  Rejoice!
6977          */
6978         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6979                 if (!netif_tx_queue_stopped(txq)) {
6980                         netif_tx_stop_queue(txq);
6981
6982                         /* This is a hard error, log it. */
6983                         netdev_err(dev,
6984                                    "BUG! Tx Ring full when queue awake!\n");
6985                 }
6986                 return NETDEV_TX_BUSY;
6987         }
6988
6989         entry = tnapi->tx_prod;
6990         base_flags = 0;
6991         if (skb->ip_summed == CHECKSUM_PARTIAL)
6992                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6993
6994         mss = skb_shinfo(skb)->gso_size;
6995         if (mss) {
6996                 struct iphdr *iph;
6997                 u32 tcp_opt_len, hdr_len;
6998
6999                 if (skb_header_cloned(skb) &&
7000                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7001                         goto drop;
7002
7003                 iph = ip_hdr(skb);
7004                 tcp_opt_len = tcp_optlen(skb);
7005
7006                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7007
7008                 if (!skb_is_gso_v6(skb)) {
7009                         iph->check = 0;
7010                         iph->tot_len = htons(mss + hdr_len);
7011                 }
7012
7013                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7014                     tg3_flag(tp, TSO_BUG))
7015                         return tg3_tso_bug(tp, skb);
7016
7017                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7018                                TXD_FLAG_CPU_POST_DMA);
7019
7020                 if (tg3_flag(tp, HW_TSO_1) ||
7021                     tg3_flag(tp, HW_TSO_2) ||
7022                     tg3_flag(tp, HW_TSO_3)) {
7023                         tcp_hdr(skb)->check = 0;
7024                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7025                 } else
7026                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7027                                                                  iph->daddr, 0,
7028                                                                  IPPROTO_TCP,
7029                                                                  0);
7030
7031                 if (tg3_flag(tp, HW_TSO_3)) {
7032                         mss |= (hdr_len & 0xc) << 12;
7033                         if (hdr_len & 0x10)
7034                                 base_flags |= 0x00000010;
7035                         base_flags |= (hdr_len & 0x3e0) << 5;
7036                 } else if (tg3_flag(tp, HW_TSO_2))
7037                         mss |= hdr_len << 9;
7038                 else if (tg3_flag(tp, HW_TSO_1) ||
7039                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7040                         if (tcp_opt_len || iph->ihl > 5) {
7041                                 int tsflags;
7042
7043                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7044                                 mss |= (tsflags << 11);
7045                         }
7046                 } else {
7047                         if (tcp_opt_len || iph->ihl > 5) {
7048                                 int tsflags;
7049
7050                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7051                                 base_flags |= tsflags << 12;
7052                         }
7053                 }
7054         }
7055
7056         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7057             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7058                 base_flags |= TXD_FLAG_JMB_PKT;
7059
7060         if (vlan_tx_tag_present(skb)) {
7061                 base_flags |= TXD_FLAG_VLAN;
7062                 vlan = vlan_tx_tag_get(skb);
7063         }
7064
7065         len = skb_headlen(skb);
7066
7067         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7068         if (pci_dma_mapping_error(tp->pdev, mapping))
7069                 goto drop;
7070
7071
7072         tnapi->tx_buffers[entry].skb = skb;
7073         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7074
7075         would_hit_hwbug = 0;
7076
7077         if (tg3_flag(tp, 5701_DMA_BUG))
7078                 would_hit_hwbug = 1;
7079
7080         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7081                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7082                             mss, vlan)) {
7083                 would_hit_hwbug = 1;
7084         } else if (skb_shinfo(skb)->nr_frags > 0) {
7085                 u32 tmp_mss = mss;
7086
7087                 if (!tg3_flag(tp, HW_TSO_1) &&
7088                     !tg3_flag(tp, HW_TSO_2) &&
7089                     !tg3_flag(tp, HW_TSO_3))
7090                         tmp_mss = 0;
7091
7092                 /* Now loop through additional data
7093                  * fragments, and queue them.
7094                  */
7095                 last = skb_shinfo(skb)->nr_frags - 1;
7096                 for (i = 0; i <= last; i++) {
7097                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7098
7099                         len = skb_frag_size(frag);
7100                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7101                                                    len, DMA_TO_DEVICE);
7102
7103                         tnapi->tx_buffers[entry].skb = NULL;
7104                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7105                                            mapping);
7106                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7107                                 goto dma_error;
7108
7109                         if (!budget ||
7110                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7111                                             len, base_flags |
7112                                             ((i == last) ? TXD_FLAG_END : 0),
7113                                             tmp_mss, vlan)) {
7114                                 would_hit_hwbug = 1;
7115                                 break;
7116                         }
7117                 }
7118         }
7119
7120         if (would_hit_hwbug) {
7121                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7122
7123                 /* If the workaround fails due to memory/mapping
7124                  * failure, silently drop this packet.
7125                  */
7126                 entry = tnapi->tx_prod;
7127                 budget = tg3_tx_avail(tnapi);
7128                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7129                                                 base_flags, mss, vlan))
7130                         goto drop_nofree;
7131         }
7132
7133         skb_tx_timestamp(skb);
7134         netdev_tx_sent_queue(txq, skb->len);
7135
7136         /* Sync BD data before updating mailbox */
7137         wmb();
7138
7139         /* Packets are ready, update Tx producer idx local and on card. */
7140         tw32_tx_mbox(tnapi->prodmbox, entry);
7141
7142         tnapi->tx_prod = entry;
7143         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7144                 netif_tx_stop_queue(txq);
7145
7146                 /* netif_tx_stop_queue() must be done before checking
7147                  * checking tx index in tg3_tx_avail() below, because in
7148                  * tg3_tx(), we update tx index before checking for
7149                  * netif_tx_queue_stopped().
7150                  */
7151                 smp_mb();
7152                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7153                         netif_tx_wake_queue(txq);
7154         }
7155
7156         mmiowb();
7157         return NETDEV_TX_OK;
7158
7159 dma_error:
7160         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7161         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7162 drop:
7163         dev_kfree_skb(skb);
7164 drop_nofree:
7165         tp->tx_dropped++;
7166         return NETDEV_TX_OK;
7167 }
7168
7169 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7170 {
7171         if (enable) {
7172                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7173                                   MAC_MODE_PORT_MODE_MASK);
7174
7175                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7176
7177                 if (!tg3_flag(tp, 5705_PLUS))
7178                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7179
7180                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7181                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7182                 else
7183                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7184         } else {
7185                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7186
7187                 if (tg3_flag(tp, 5705_PLUS) ||
7188                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7190                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7191         }
7192
7193         tw32(MAC_MODE, tp->mac_mode);
7194         udelay(40);
7195 }
7196
7197 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7198 {
7199         u32 val, bmcr, mac_mode, ptest = 0;
7200
7201         tg3_phy_toggle_apd(tp, false);
7202         tg3_phy_toggle_automdix(tp, 0);
7203
7204         if (extlpbk && tg3_phy_set_extloopbk(tp))
7205                 return -EIO;
7206
7207         bmcr = BMCR_FULLDPLX;
7208         switch (speed) {
7209         case SPEED_10:
7210                 break;
7211         case SPEED_100:
7212                 bmcr |= BMCR_SPEED100;
7213                 break;
7214         case SPEED_1000:
7215         default:
7216                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7217                         speed = SPEED_100;
7218                         bmcr |= BMCR_SPEED100;
7219                 } else {
7220                         speed = SPEED_1000;
7221                         bmcr |= BMCR_SPEED1000;
7222                 }
7223         }
7224
7225         if (extlpbk) {
7226                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7227                         tg3_readphy(tp, MII_CTRL1000, &val);
7228                         val |= CTL1000_AS_MASTER |
7229                                CTL1000_ENABLE_MASTER;
7230                         tg3_writephy(tp, MII_CTRL1000, val);
7231                 } else {
7232                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7233                                 MII_TG3_FET_PTEST_TRIM_2;
7234                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7235                 }
7236         } else
7237                 bmcr |= BMCR_LOOPBACK;
7238
7239         tg3_writephy(tp, MII_BMCR, bmcr);
7240
7241         /* The write needs to be flushed for the FETs */
7242         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7243                 tg3_readphy(tp, MII_BMCR, &bmcr);
7244
7245         udelay(40);
7246
7247         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7249                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7250                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7251                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7252
7253                 /* The write needs to be flushed for the AC131 */
7254                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7255         }
7256
7257         /* Reset to prevent losing 1st rx packet intermittently */
7258         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7259             tg3_flag(tp, 5780_CLASS)) {
7260                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7261                 udelay(10);
7262                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7263         }
7264
7265         mac_mode = tp->mac_mode &
7266                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7267         if (speed == SPEED_1000)
7268                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7269         else
7270                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7271
7272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7273                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7274
7275                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7276                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7277                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7278                         mac_mode |= MAC_MODE_LINK_POLARITY;
7279
7280                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7281                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7282         }
7283
7284         tw32(MAC_MODE, mac_mode);
7285         udelay(40);
7286
7287         return 0;
7288 }
7289
7290 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7291 {
7292         struct tg3 *tp = netdev_priv(dev);
7293
7294         if (features & NETIF_F_LOOPBACK) {
7295                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7296                         return;
7297
7298                 spin_lock_bh(&tp->lock);
7299                 tg3_mac_loopback(tp, true);
7300                 netif_carrier_on(tp->dev);
7301                 spin_unlock_bh(&tp->lock);
7302                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7303         } else {
7304                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7305                         return;
7306
7307                 spin_lock_bh(&tp->lock);
7308                 tg3_mac_loopback(tp, false);
7309                 /* Force link status check */
7310                 tg3_setup_phy(tp, 1);
7311                 spin_unlock_bh(&tp->lock);
7312                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7313         }
7314 }
7315
7316 static netdev_features_t tg3_fix_features(struct net_device *dev,
7317         netdev_features_t features)
7318 {
7319         struct tg3 *tp = netdev_priv(dev);
7320
7321         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7322                 features &= ~NETIF_F_ALL_TSO;
7323
7324         return features;
7325 }
7326
7327 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7328 {
7329         netdev_features_t changed = dev->features ^ features;
7330
7331         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7332                 tg3_set_loopback(dev, features);
7333
7334         return 0;
7335 }
7336
7337 static void tg3_rx_prodring_free(struct tg3 *tp,
7338                                  struct tg3_rx_prodring_set *tpr)
7339 {
7340         int i;
7341
7342         if (tpr != &tp->napi[0].prodring) {
7343                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7344                      i = (i + 1) & tp->rx_std_ring_mask)
7345                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7346                                         tp->rx_pkt_map_sz);
7347
7348                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7349                         for (i = tpr->rx_jmb_cons_idx;
7350                              i != tpr->rx_jmb_prod_idx;
7351                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7352                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7353                                                 TG3_RX_JMB_MAP_SZ);
7354                         }
7355                 }
7356
7357                 return;
7358         }
7359
7360         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7361                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7362                                 tp->rx_pkt_map_sz);
7363
7364         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7365                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7366                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7367                                         TG3_RX_JMB_MAP_SZ);
7368         }
7369 }
7370
7371 /* Initialize rx rings for packet processing.
7372  *
7373  * The chip has been shut down and the driver detached from
7374  * the networking, so no interrupts or new tx packets will
7375  * end up in the driver.  tp->{tx,}lock are held and thus
7376  * we may not sleep.
7377  */
7378 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7379                                  struct tg3_rx_prodring_set *tpr)
7380 {
7381         u32 i, rx_pkt_dma_sz;
7382
7383         tpr->rx_std_cons_idx = 0;
7384         tpr->rx_std_prod_idx = 0;
7385         tpr->rx_jmb_cons_idx = 0;
7386         tpr->rx_jmb_prod_idx = 0;
7387
7388         if (tpr != &tp->napi[0].prodring) {
7389                 memset(&tpr->rx_std_buffers[0], 0,
7390                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7391                 if (tpr->rx_jmb_buffers)
7392                         memset(&tpr->rx_jmb_buffers[0], 0,
7393                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7394                 goto done;
7395         }
7396
7397         /* Zero out all descriptors. */
7398         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7399
7400         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7401         if (tg3_flag(tp, 5780_CLASS) &&
7402             tp->dev->mtu > ETH_DATA_LEN)
7403                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7404         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7405
7406         /* Initialize invariants of the rings, we only set this
7407          * stuff once.  This works because the card does not
7408          * write into the rx buffer posting rings.
7409          */
7410         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7411                 struct tg3_rx_buffer_desc *rxd;
7412
7413                 rxd = &tpr->rx_std[i];
7414                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7415                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7416                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7417                                (i << RXD_OPAQUE_INDEX_SHIFT));
7418         }
7419
7420         /* Now allocate fresh SKBs for each rx ring. */
7421         for (i = 0; i < tp->rx_pending; i++) {
7422                 unsigned int frag_size;
7423
7424                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7425                                       &frag_size) < 0) {
7426                         netdev_warn(tp->dev,
7427                                     "Using a smaller RX standard ring. Only "
7428                                     "%d out of %d buffers were allocated "
7429                                     "successfully\n", i, tp->rx_pending);
7430                         if (i == 0)
7431                                 goto initfail;
7432                         tp->rx_pending = i;
7433                         break;
7434                 }
7435         }
7436
7437         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7438                 goto done;
7439
7440         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7441
7442         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7443                 goto done;
7444
7445         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7446                 struct tg3_rx_buffer_desc *rxd;
7447
7448                 rxd = &tpr->rx_jmb[i].std;
7449                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7450                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7451                                   RXD_FLAG_JUMBO;
7452                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7453                        (i << RXD_OPAQUE_INDEX_SHIFT));
7454         }
7455
7456         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7457                 unsigned int frag_size;
7458
7459                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7460                                       &frag_size) < 0) {
7461                         netdev_warn(tp->dev,
7462                                     "Using a smaller RX jumbo ring. Only %d "
7463                                     "out of %d buffers were allocated "
7464                                     "successfully\n", i, tp->rx_jumbo_pending);
7465                         if (i == 0)
7466                                 goto initfail;
7467                         tp->rx_jumbo_pending = i;
7468                         break;
7469                 }
7470         }
7471
7472 done:
7473         return 0;
7474
7475 initfail:
7476         tg3_rx_prodring_free(tp, tpr);
7477         return -ENOMEM;
7478 }
7479
7480 static void tg3_rx_prodring_fini(struct tg3 *tp,
7481                                  struct tg3_rx_prodring_set *tpr)
7482 {
7483         kfree(tpr->rx_std_buffers);
7484         tpr->rx_std_buffers = NULL;
7485         kfree(tpr->rx_jmb_buffers);
7486         tpr->rx_jmb_buffers = NULL;
7487         if (tpr->rx_std) {
7488                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7489                                   tpr->rx_std, tpr->rx_std_mapping);
7490                 tpr->rx_std = NULL;
7491         }
7492         if (tpr->rx_jmb) {
7493                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7494                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7495                 tpr->rx_jmb = NULL;
7496         }
7497 }
7498
7499 static int tg3_rx_prodring_init(struct tg3 *tp,
7500                                 struct tg3_rx_prodring_set *tpr)
7501 {
7502         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7503                                       GFP_KERNEL);
7504         if (!tpr->rx_std_buffers)
7505                 return -ENOMEM;
7506
7507         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7508                                          TG3_RX_STD_RING_BYTES(tp),
7509                                          &tpr->rx_std_mapping,
7510                                          GFP_KERNEL);
7511         if (!tpr->rx_std)
7512                 goto err_out;
7513
7514         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7515                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7516                                               GFP_KERNEL);
7517                 if (!tpr->rx_jmb_buffers)
7518                         goto err_out;
7519
7520                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7521                                                  TG3_RX_JMB_RING_BYTES(tp),
7522                                                  &tpr->rx_jmb_mapping,
7523                                                  GFP_KERNEL);
7524                 if (!tpr->rx_jmb)
7525                         goto err_out;
7526         }
7527
7528         return 0;
7529
7530 err_out:
7531         tg3_rx_prodring_fini(tp, tpr);
7532         return -ENOMEM;
7533 }
7534
7535 /* Free up pending packets in all rx/tx rings.
7536  *
7537  * The chip has been shut down and the driver detached from
7538  * the networking, so no interrupts or new tx packets will
7539  * end up in the driver.  tp->{tx,}lock is not held and we are not
7540  * in an interrupt context and thus may sleep.
7541  */
7542 static void tg3_free_rings(struct tg3 *tp)
7543 {
7544         int i, j;
7545
7546         for (j = 0; j < tp->irq_cnt; j++) {
7547                 struct tg3_napi *tnapi = &tp->napi[j];
7548
7549                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7550
7551                 if (!tnapi->tx_buffers)
7552                         continue;
7553
7554                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7555                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7556
7557                         if (!skb)
7558                                 continue;
7559
7560                         tg3_tx_skb_unmap(tnapi, i,
7561                                          skb_shinfo(skb)->nr_frags - 1);
7562
7563                         dev_kfree_skb_any(skb);
7564                 }
7565                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7566         }
7567 }
7568
7569 /* Initialize tx/rx rings for packet processing.
7570  *
7571  * The chip has been shut down and the driver detached from
7572  * the networking, so no interrupts or new tx packets will
7573  * end up in the driver.  tp->{tx,}lock are held and thus
7574  * we may not sleep.
7575  */
7576 static int tg3_init_rings(struct tg3 *tp)
7577 {
7578         int i;
7579
7580         /* Free up all the SKBs. */
7581         tg3_free_rings(tp);
7582
7583         for (i = 0; i < tp->irq_cnt; i++) {
7584                 struct tg3_napi *tnapi = &tp->napi[i];
7585
7586                 tnapi->last_tag = 0;
7587                 tnapi->last_irq_tag = 0;
7588                 tnapi->hw_status->status = 0;
7589                 tnapi->hw_status->status_tag = 0;
7590                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7591
7592                 tnapi->tx_prod = 0;
7593                 tnapi->tx_cons = 0;
7594                 if (tnapi->tx_ring)
7595                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7596
7597                 tnapi->rx_rcb_ptr = 0;
7598                 if (tnapi->rx_rcb)
7599                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7600
7601                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7602                         tg3_free_rings(tp);
7603                         return -ENOMEM;
7604                 }
7605         }
7606
7607         return 0;
7608 }
7609
7610 /*
7611  * Must not be invoked with interrupt sources disabled and
7612  * the hardware shutdown down.
7613  */
7614 static void tg3_free_consistent(struct tg3 *tp)
7615 {
7616         int i;
7617
7618         for (i = 0; i < tp->irq_cnt; i++) {
7619                 struct tg3_napi *tnapi = &tp->napi[i];
7620
7621                 if (tnapi->tx_ring) {
7622                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7623                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7624                         tnapi->tx_ring = NULL;
7625                 }
7626
7627                 kfree(tnapi->tx_buffers);
7628                 tnapi->tx_buffers = NULL;
7629
7630                 if (tnapi->rx_rcb) {
7631                         dma_free_coherent(&tp->pdev->dev,
7632                                           TG3_RX_RCB_RING_BYTES(tp),
7633                                           tnapi->rx_rcb,
7634                                           tnapi->rx_rcb_mapping);
7635                         tnapi->rx_rcb = NULL;
7636                 }
7637
7638                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7639
7640                 if (tnapi->hw_status) {
7641                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7642                                           tnapi->hw_status,
7643                                           tnapi->status_mapping);
7644                         tnapi->hw_status = NULL;
7645                 }
7646         }
7647
7648         if (tp->hw_stats) {
7649                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7650                                   tp->hw_stats, tp->stats_mapping);
7651                 tp->hw_stats = NULL;
7652         }
7653 }
7654
7655 /*
7656  * Must not be invoked with interrupt sources disabled and
7657  * the hardware shutdown down.  Can sleep.
7658  */
7659 static int tg3_alloc_consistent(struct tg3 *tp)
7660 {
7661         int i;
7662
7663         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7664                                           sizeof(struct tg3_hw_stats),
7665                                           &tp->stats_mapping,
7666                                           GFP_KERNEL);
7667         if (!tp->hw_stats)
7668                 goto err_out;
7669
7670         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7671
7672         for (i = 0; i < tp->irq_cnt; i++) {
7673                 struct tg3_napi *tnapi = &tp->napi[i];
7674                 struct tg3_hw_status *sblk;
7675
7676                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7677                                                       TG3_HW_STATUS_SIZE,
7678                                                       &tnapi->status_mapping,
7679                                                       GFP_KERNEL);
7680                 if (!tnapi->hw_status)
7681                         goto err_out;
7682
7683                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7684                 sblk = tnapi->hw_status;
7685
7686                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7687                         goto err_out;
7688
7689                 /* If multivector TSS is enabled, vector 0 does not handle
7690                  * tx interrupts.  Don't allocate any resources for it.
7691                  */
7692                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7693                     (i && tg3_flag(tp, ENABLE_TSS))) {
7694                         tnapi->tx_buffers = kzalloc(
7695                                                sizeof(struct tg3_tx_ring_info) *
7696                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7697                         if (!tnapi->tx_buffers)
7698                                 goto err_out;
7699
7700                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7701                                                             TG3_TX_RING_BYTES,
7702                                                         &tnapi->tx_desc_mapping,
7703                                                             GFP_KERNEL);
7704                         if (!tnapi->tx_ring)
7705                                 goto err_out;
7706                 }
7707
7708                 /*
7709                  * When RSS is enabled, the status block format changes
7710                  * slightly.  The "rx_jumbo_consumer", "reserved",
7711                  * and "rx_mini_consumer" members get mapped to the
7712                  * other three rx return ring producer indexes.
7713                  */
7714                 switch (i) {
7715                 default:
7716                         if (tg3_flag(tp, ENABLE_RSS)) {
7717                                 tnapi->rx_rcb_prod_idx = NULL;
7718                                 break;
7719                         }
7720                         /* Fall through */
7721                 case 1:
7722                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7723                         break;
7724                 case 2:
7725                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7726                         break;
7727                 case 3:
7728                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7729                         break;
7730                 case 4:
7731                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7732                         break;
7733                 }
7734
7735                 /*
7736                  * If multivector RSS is enabled, vector 0 does not handle
7737                  * rx or tx interrupts.  Don't allocate any resources for it.
7738                  */
7739                 if (!i && tg3_flag(tp, ENABLE_RSS))
7740                         continue;
7741
7742                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7743                                                    TG3_RX_RCB_RING_BYTES(tp),
7744                                                    &tnapi->rx_rcb_mapping,
7745                                                    GFP_KERNEL);
7746                 if (!tnapi->rx_rcb)
7747                         goto err_out;
7748
7749                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7750         }
7751
7752         return 0;
7753
7754 err_out:
7755         tg3_free_consistent(tp);
7756         return -ENOMEM;
7757 }
7758
7759 #define MAX_WAIT_CNT 1000
7760
7761 /* To stop a block, clear the enable bit and poll till it
7762  * clears.  tp->lock is held.
7763  */
7764 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7765 {
7766         unsigned int i;
7767         u32 val;
7768
7769         if (tg3_flag(tp, 5705_PLUS)) {
7770                 switch (ofs) {
7771                 case RCVLSC_MODE:
7772                 case DMAC_MODE:
7773                 case MBFREE_MODE:
7774                 case BUFMGR_MODE:
7775                 case MEMARB_MODE:
7776                         /* We can't enable/disable these bits of the
7777                          * 5705/5750, just say success.
7778                          */
7779                         return 0;
7780
7781                 default:
7782                         break;
7783                 }
7784         }
7785
7786         val = tr32(ofs);
7787         val &= ~enable_bit;
7788         tw32_f(ofs, val);
7789
7790         for (i = 0; i < MAX_WAIT_CNT; i++) {
7791                 udelay(100);
7792                 val = tr32(ofs);
7793                 if ((val & enable_bit) == 0)
7794                         break;
7795         }
7796
7797         if (i == MAX_WAIT_CNT && !silent) {
7798                 dev_err(&tp->pdev->dev,
7799                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7800                         ofs, enable_bit);
7801                 return -ENODEV;
7802         }
7803
7804         return 0;
7805 }
7806
7807 /* tp->lock is held. */
7808 static int tg3_abort_hw(struct tg3 *tp, int silent)
7809 {
7810         int i, err;
7811
7812         tg3_disable_ints(tp);
7813
7814         tp->rx_mode &= ~RX_MODE_ENABLE;
7815         tw32_f(MAC_RX_MODE, tp->rx_mode);
7816         udelay(10);
7817
7818         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7819         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7820         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7821         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7822         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7823         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7824
7825         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7826         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7827         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7828         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7829         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7830         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7831         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7832
7833         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7834         tw32_f(MAC_MODE, tp->mac_mode);
7835         udelay(40);
7836
7837         tp->tx_mode &= ~TX_MODE_ENABLE;
7838         tw32_f(MAC_TX_MODE, tp->tx_mode);
7839
7840         for (i = 0; i < MAX_WAIT_CNT; i++) {
7841                 udelay(100);
7842                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7843                         break;
7844         }
7845         if (i >= MAX_WAIT_CNT) {
7846                 dev_err(&tp->pdev->dev,
7847                         "%s timed out, TX_MODE_ENABLE will not clear "
7848                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7849                 err |= -ENODEV;
7850         }
7851
7852         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7853         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7854         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7855
7856         tw32(FTQ_RESET, 0xffffffff);
7857         tw32(FTQ_RESET, 0x00000000);
7858
7859         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7860         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7861
7862         for (i = 0; i < tp->irq_cnt; i++) {
7863                 struct tg3_napi *tnapi = &tp->napi[i];
7864                 if (tnapi->hw_status)
7865                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7866         }
7867
7868         return err;
7869 }
7870
7871 /* Save PCI command register before chip reset */
7872 static void tg3_save_pci_state(struct tg3 *tp)
7873 {
7874         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7875 }
7876
7877 /* Restore PCI state after chip reset */
7878 static void tg3_restore_pci_state(struct tg3 *tp)
7879 {
7880         u32 val;
7881
7882         /* Re-enable indirect register accesses. */
7883         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7884                                tp->misc_host_ctrl);
7885
7886         /* Set MAX PCI retry to zero. */
7887         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7888         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7889             tg3_flag(tp, PCIX_MODE))
7890                 val |= PCISTATE_RETRY_SAME_DMA;
7891         /* Allow reads and writes to the APE register and memory space. */
7892         if (tg3_flag(tp, ENABLE_APE))
7893                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7894                        PCISTATE_ALLOW_APE_SHMEM_WR |
7895                        PCISTATE_ALLOW_APE_PSPACE_WR;
7896         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7897
7898         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7899
7900         if (!tg3_flag(tp, PCI_EXPRESS)) {
7901                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7902                                       tp->pci_cacheline_sz);
7903                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7904                                       tp->pci_lat_timer);
7905         }
7906
7907         /* Make sure PCI-X relaxed ordering bit is clear. */
7908         if (tg3_flag(tp, PCIX_MODE)) {
7909                 u16 pcix_cmd;
7910
7911                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7912                                      &pcix_cmd);
7913                 pcix_cmd &= ~PCI_X_CMD_ERO;
7914                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7915                                       pcix_cmd);
7916         }
7917
7918         if (tg3_flag(tp, 5780_CLASS)) {
7919
7920                 /* Chip reset on 5780 will reset MSI enable bit,
7921                  * so need to restore it.
7922                  */
7923                 if (tg3_flag(tp, USING_MSI)) {
7924                         u16 ctrl;
7925
7926                         pci_read_config_word(tp->pdev,
7927                                              tp->msi_cap + PCI_MSI_FLAGS,
7928                                              &ctrl);
7929                         pci_write_config_word(tp->pdev,
7930                                               tp->msi_cap + PCI_MSI_FLAGS,
7931                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7932                         val = tr32(MSGINT_MODE);
7933                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7934                 }
7935         }
7936 }
7937
7938 /* tp->lock is held. */
7939 static int tg3_chip_reset(struct tg3 *tp)
7940 {
7941         u32 val;
7942         void (*write_op)(struct tg3 *, u32, u32);
7943         int i, err;
7944
7945         tg3_nvram_lock(tp);
7946
7947         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7948
7949         /* No matching tg3_nvram_unlock() after this because
7950          * chip reset below will undo the nvram lock.
7951          */
7952         tp->nvram_lock_cnt = 0;
7953
7954         /* GRC_MISC_CFG core clock reset will clear the memory
7955          * enable bit in PCI register 4 and the MSI enable bit
7956          * on some chips, so we save relevant registers here.
7957          */
7958         tg3_save_pci_state(tp);
7959
7960         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7961             tg3_flag(tp, 5755_PLUS))
7962                 tw32(GRC_FASTBOOT_PC, 0);
7963
7964         /*
7965          * We must avoid the readl() that normally takes place.
7966          * It locks machines, causes machine checks, and other
7967          * fun things.  So, temporarily disable the 5701
7968          * hardware workaround, while we do the reset.
7969          */
7970         write_op = tp->write32;
7971         if (write_op == tg3_write_flush_reg32)
7972                 tp->write32 = tg3_write32;
7973
7974         /* Prevent the irq handler from reading or writing PCI registers
7975          * during chip reset when the memory enable bit in the PCI command
7976          * register may be cleared.  The chip does not generate interrupt
7977          * at this time, but the irq handler may still be called due to irq
7978          * sharing or irqpoll.
7979          */
7980         tg3_flag_set(tp, CHIP_RESETTING);
7981         for (i = 0; i < tp->irq_cnt; i++) {
7982                 struct tg3_napi *tnapi = &tp->napi[i];
7983                 if (tnapi->hw_status) {
7984                         tnapi->hw_status->status = 0;
7985                         tnapi->hw_status->status_tag = 0;
7986                 }
7987                 tnapi->last_tag = 0;
7988                 tnapi->last_irq_tag = 0;
7989         }
7990         smp_mb();
7991
7992         for (i = 0; i < tp->irq_cnt; i++)
7993                 synchronize_irq(tp->napi[i].irq_vec);
7994
7995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7996                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7997                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7998         }
7999
8000         /* do the reset */
8001         val = GRC_MISC_CFG_CORECLK_RESET;
8002
8003         if (tg3_flag(tp, PCI_EXPRESS)) {
8004                 /* Force PCIe 1.0a mode */
8005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8006                     !tg3_flag(tp, 57765_PLUS) &&
8007                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8008                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8009                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8010
8011                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8012                         tw32(GRC_MISC_CFG, (1 << 29));
8013                         val |= (1 << 29);
8014                 }
8015         }
8016
8017         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8018                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8019                 tw32(GRC_VCPU_EXT_CTRL,
8020                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8021         }
8022
8023         /* Manage gphy power for all CPMU absent PCIe devices. */
8024         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8025                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8026
8027         tw32(GRC_MISC_CFG, val);
8028
8029         /* restore 5701 hardware bug workaround write method */
8030         tp->write32 = write_op;
8031
8032         /* Unfortunately, we have to delay before the PCI read back.
8033          * Some 575X chips even will not respond to a PCI cfg access
8034          * when the reset command is given to the chip.
8035          *
8036          * How do these hardware designers expect things to work
8037          * properly if the PCI write is posted for a long period
8038          * of time?  It is always necessary to have some method by
8039          * which a register read back can occur to push the write
8040          * out which does the reset.
8041          *
8042          * For most tg3 variants the trick below was working.
8043          * Ho hum...
8044          */
8045         udelay(120);
8046
8047         /* Flush PCI posted writes.  The normal MMIO registers
8048          * are inaccessible at this time so this is the only
8049          * way to make this reliably (actually, this is no longer
8050          * the case, see above).  I tried to use indirect
8051          * register read/write but this upset some 5701 variants.
8052          */
8053         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8054
8055         udelay(120);
8056
8057         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
8058                 u16 val16;
8059
8060                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8061                         int i;
8062                         u32 cfg_val;
8063
8064                         /* Wait for link training to complete.  */
8065                         for (i = 0; i < 5000; i++)
8066                                 udelay(100);
8067
8068                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8069                         pci_write_config_dword(tp->pdev, 0xc4,
8070                                                cfg_val | (1 << 15));
8071                 }
8072
8073                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8074                 pci_read_config_word(tp->pdev,
8075                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8076                                      &val16);
8077                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8078                            PCI_EXP_DEVCTL_NOSNOOP_EN);
8079                 /*
8080                  * Older PCIe devices only support the 128 byte
8081                  * MPS setting.  Enforce the restriction.
8082                  */
8083                 if (!tg3_flag(tp, CPMU_PRESENT))
8084                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8085                 pci_write_config_word(tp->pdev,
8086                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8087                                       val16);
8088
8089                 /* Clear error status */
8090                 pci_write_config_word(tp->pdev,
8091                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8092                                       PCI_EXP_DEVSTA_CED |
8093                                       PCI_EXP_DEVSTA_NFED |
8094                                       PCI_EXP_DEVSTA_FED |
8095                                       PCI_EXP_DEVSTA_URD);
8096         }
8097
8098         tg3_restore_pci_state(tp);
8099
8100         tg3_flag_clear(tp, CHIP_RESETTING);
8101         tg3_flag_clear(tp, ERROR_PROCESSED);
8102
8103         val = 0;
8104         if (tg3_flag(tp, 5780_CLASS))
8105                 val = tr32(MEMARB_MODE);
8106         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8107
8108         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8109                 tg3_stop_fw(tp);
8110                 tw32(0x5000, 0x400);
8111         }
8112
8113         tw32(GRC_MODE, tp->grc_mode);
8114
8115         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8116                 val = tr32(0xc4);
8117
8118                 tw32(0xc4, val | (1 << 15));
8119         }
8120
8121         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8123                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8124                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8125                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8126                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8127         }
8128
8129         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8130                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8131                 val = tp->mac_mode;
8132         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8133                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8134                 val = tp->mac_mode;
8135         } else
8136                 val = 0;
8137
8138         tw32_f(MAC_MODE, val);
8139         udelay(40);
8140
8141         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8142
8143         err = tg3_poll_fw(tp);
8144         if (err)
8145                 return err;
8146
8147         tg3_mdio_start(tp);
8148
8149         if (tg3_flag(tp, PCI_EXPRESS) &&
8150             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8151             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8152             !tg3_flag(tp, 57765_PLUS)) {
8153                 val = tr32(0x7c00);
8154
8155                 tw32(0x7c00, val | (1 << 25));
8156         }
8157
8158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8159                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8160                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8161         }
8162
8163         /* Reprobe ASF enable state.  */
8164         tg3_flag_clear(tp, ENABLE_ASF);
8165         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8166         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8167         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8168                 u32 nic_cfg;
8169
8170                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8171                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8172                         tg3_flag_set(tp, ENABLE_ASF);
8173                         tp->last_event_jiffies = jiffies;
8174                         if (tg3_flag(tp, 5750_PLUS))
8175                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8176                 }
8177         }
8178
8179         return 0;
8180 }
8181
8182 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8183 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8184
8185 /* tp->lock is held. */
8186 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8187 {
8188         int err;
8189
8190         tg3_stop_fw(tp);
8191
8192         tg3_write_sig_pre_reset(tp, kind);
8193
8194         tg3_abort_hw(tp, silent);
8195         err = tg3_chip_reset(tp);
8196
8197         __tg3_set_mac_addr(tp, 0);
8198
8199         tg3_write_sig_legacy(tp, kind);
8200         tg3_write_sig_post_reset(tp, kind);
8201
8202         if (tp->hw_stats) {
8203                 /* Save the stats across chip resets... */
8204                 tg3_get_nstats(tp, &tp->net_stats_prev);
8205                 tg3_get_estats(tp, &tp->estats_prev);
8206
8207                 /* And make sure the next sample is new data */
8208                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8209         }
8210
8211         if (err)
8212                 return err;
8213
8214         return 0;
8215 }
8216
8217 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8218 {
8219         struct tg3 *tp = netdev_priv(dev);
8220         struct sockaddr *addr = p;
8221         int err = 0, skip_mac_1 = 0;
8222
8223         if (!is_valid_ether_addr(addr->sa_data))
8224                 return -EADDRNOTAVAIL;
8225
8226         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8227
8228         if (!netif_running(dev))
8229                 return 0;
8230
8231         if (tg3_flag(tp, ENABLE_ASF)) {
8232                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8233
8234                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8235                 addr0_low = tr32(MAC_ADDR_0_LOW);
8236                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8237                 addr1_low = tr32(MAC_ADDR_1_LOW);
8238
8239                 /* Skip MAC addr 1 if ASF is using it. */
8240                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8241                     !(addr1_high == 0 && addr1_low == 0))
8242                         skip_mac_1 = 1;
8243         }
8244         spin_lock_bh(&tp->lock);
8245         __tg3_set_mac_addr(tp, skip_mac_1);
8246         spin_unlock_bh(&tp->lock);
8247
8248         return err;
8249 }
8250
8251 /* tp->lock is held. */
8252 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8253                            dma_addr_t mapping, u32 maxlen_flags,
8254                            u32 nic_addr)
8255 {
8256         tg3_write_mem(tp,
8257                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8258                       ((u64) mapping >> 32));
8259         tg3_write_mem(tp,
8260                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8261                       ((u64) mapping & 0xffffffff));
8262         tg3_write_mem(tp,
8263                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8264                        maxlen_flags);
8265
8266         if (!tg3_flag(tp, 5705_PLUS))
8267                 tg3_write_mem(tp,
8268                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8269                               nic_addr);
8270 }
8271
8272 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8273 {
8274         int i;
8275
8276         if (!tg3_flag(tp, ENABLE_TSS)) {
8277                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8278                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8279                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8280         } else {
8281                 tw32(HOSTCC_TXCOL_TICKS, 0);
8282                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8283                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8284         }
8285
8286         if (!tg3_flag(tp, ENABLE_RSS)) {
8287                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8288                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8289                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8290         } else {
8291                 tw32(HOSTCC_RXCOL_TICKS, 0);
8292                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8293                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8294         }
8295
8296         if (!tg3_flag(tp, 5705_PLUS)) {
8297                 u32 val = ec->stats_block_coalesce_usecs;
8298
8299                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8300                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8301
8302                 if (!netif_carrier_ok(tp->dev))
8303                         val = 0;
8304
8305                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8306         }
8307
8308         for (i = 0; i < tp->irq_cnt - 1; i++) {
8309                 u32 reg;
8310
8311                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8312                 tw32(reg, ec->rx_coalesce_usecs);
8313                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8314                 tw32(reg, ec->rx_max_coalesced_frames);
8315                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8316                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8317
8318                 if (tg3_flag(tp, ENABLE_TSS)) {
8319                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8320                         tw32(reg, ec->tx_coalesce_usecs);
8321                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8322                         tw32(reg, ec->tx_max_coalesced_frames);
8323                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8324                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8325                 }
8326         }
8327
8328         for (; i < tp->irq_max - 1; i++) {
8329                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8330                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8331                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8332
8333                 if (tg3_flag(tp, ENABLE_TSS)) {
8334                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8335                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8336                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8337                 }
8338         }
8339 }
8340
8341 /* tp->lock is held. */
8342 static void tg3_rings_reset(struct tg3 *tp)
8343 {
8344         int i;
8345         u32 stblk, txrcb, rxrcb, limit;
8346         struct tg3_napi *tnapi = &tp->napi[0];
8347
8348         /* Disable all transmit rings but the first. */
8349         if (!tg3_flag(tp, 5705_PLUS))
8350                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8351         else if (tg3_flag(tp, 5717_PLUS))
8352                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8353         else if (tg3_flag(tp, 57765_CLASS))
8354                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8355         else
8356                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8357
8358         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8359              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8360                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8361                               BDINFO_FLAGS_DISABLED);
8362
8363
8364         /* Disable all receive return rings but the first. */
8365         if (tg3_flag(tp, 5717_PLUS))
8366                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8367         else if (!tg3_flag(tp, 5705_PLUS))
8368                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8369         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8370                  tg3_flag(tp, 57765_CLASS))
8371                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8372         else
8373                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8374
8375         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8376              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8377                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8378                               BDINFO_FLAGS_DISABLED);
8379
8380         /* Disable interrupts */
8381         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8382         tp->napi[0].chk_msi_cnt = 0;
8383         tp->napi[0].last_rx_cons = 0;
8384         tp->napi[0].last_tx_cons = 0;
8385
8386         /* Zero mailbox registers. */
8387         if (tg3_flag(tp, SUPPORT_MSIX)) {
8388                 for (i = 1; i < tp->irq_max; i++) {
8389                         tp->napi[i].tx_prod = 0;
8390                         tp->napi[i].tx_cons = 0;
8391                         if (tg3_flag(tp, ENABLE_TSS))
8392                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8393                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8394                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8395                         tp->napi[i].chk_msi_cnt = 0;
8396                         tp->napi[i].last_rx_cons = 0;
8397                         tp->napi[i].last_tx_cons = 0;
8398                 }
8399                 if (!tg3_flag(tp, ENABLE_TSS))
8400                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8401         } else {
8402                 tp->napi[0].tx_prod = 0;
8403                 tp->napi[0].tx_cons = 0;
8404                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8405                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8406         }
8407
8408         /* Make sure the NIC-based send BD rings are disabled. */
8409         if (!tg3_flag(tp, 5705_PLUS)) {
8410                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8411                 for (i = 0; i < 16; i++)
8412                         tw32_tx_mbox(mbox + i * 8, 0);
8413         }
8414
8415         txrcb = NIC_SRAM_SEND_RCB;
8416         rxrcb = NIC_SRAM_RCV_RET_RCB;
8417
8418         /* Clear status block in ram. */
8419         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8420
8421         /* Set status block DMA address */
8422         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8423              ((u64) tnapi->status_mapping >> 32));
8424         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8425              ((u64) tnapi->status_mapping & 0xffffffff));
8426
8427         if (tnapi->tx_ring) {
8428                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8429                                (TG3_TX_RING_SIZE <<
8430                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8431                                NIC_SRAM_TX_BUFFER_DESC);
8432                 txrcb += TG3_BDINFO_SIZE;
8433         }
8434
8435         if (tnapi->rx_rcb) {
8436                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8437                                (tp->rx_ret_ring_mask + 1) <<
8438                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8439                 rxrcb += TG3_BDINFO_SIZE;
8440         }
8441
8442         stblk = HOSTCC_STATBLCK_RING1;
8443
8444         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8445                 u64 mapping = (u64)tnapi->status_mapping;
8446                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8447                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8448
8449                 /* Clear status block in ram. */
8450                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8451
8452                 if (tnapi->tx_ring) {
8453                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8454                                        (TG3_TX_RING_SIZE <<
8455                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8456                                        NIC_SRAM_TX_BUFFER_DESC);
8457                         txrcb += TG3_BDINFO_SIZE;
8458                 }
8459
8460                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8461                                ((tp->rx_ret_ring_mask + 1) <<
8462                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8463
8464                 stblk += 8;
8465                 rxrcb += TG3_BDINFO_SIZE;
8466         }
8467 }
8468
8469 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8470 {
8471         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8472
8473         if (!tg3_flag(tp, 5750_PLUS) ||
8474             tg3_flag(tp, 5780_CLASS) ||
8475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8477             tg3_flag(tp, 57765_PLUS))
8478                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8479         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8480                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8481                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8482         else
8483                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8484
8485         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8486         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8487
8488         val = min(nic_rep_thresh, host_rep_thresh);
8489         tw32(RCVBDI_STD_THRESH, val);
8490
8491         if (tg3_flag(tp, 57765_PLUS))
8492                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8493
8494         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8495                 return;
8496
8497         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8498
8499         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8500
8501         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8502         tw32(RCVBDI_JUMBO_THRESH, val);
8503
8504         if (tg3_flag(tp, 57765_PLUS))
8505                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8506 }
8507
8508 static inline u32 calc_crc(unsigned char *buf, int len)
8509 {
8510         u32 reg;
8511         u32 tmp;
8512         int j, k;
8513
8514         reg = 0xffffffff;
8515
8516         for (j = 0; j < len; j++) {
8517                 reg ^= buf[j];
8518
8519                 for (k = 0; k < 8; k++) {
8520                         tmp = reg & 0x01;
8521
8522                         reg >>= 1;
8523
8524                         if (tmp)
8525                                 reg ^= 0xedb88320;
8526                 }
8527         }
8528
8529         return ~reg;
8530 }
8531
8532 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8533 {
8534         /* accept or reject all multicast frames */
8535         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8536         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8537         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8538         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8539 }
8540
8541 static void __tg3_set_rx_mode(struct net_device *dev)
8542 {
8543         struct tg3 *tp = netdev_priv(dev);
8544         u32 rx_mode;
8545
8546         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8547                                   RX_MODE_KEEP_VLAN_TAG);
8548
8549 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8550         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8551          * flag clear.
8552          */
8553         if (!tg3_flag(tp, ENABLE_ASF))
8554                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8555 #endif
8556
8557         if (dev->flags & IFF_PROMISC) {
8558                 /* Promiscuous mode. */
8559                 rx_mode |= RX_MODE_PROMISC;
8560         } else if (dev->flags & IFF_ALLMULTI) {
8561                 /* Accept all multicast. */
8562                 tg3_set_multi(tp, 1);
8563         } else if (netdev_mc_empty(dev)) {
8564                 /* Reject all multicast. */
8565                 tg3_set_multi(tp, 0);
8566         } else {
8567                 /* Accept one or more multicast(s). */
8568                 struct netdev_hw_addr *ha;
8569                 u32 mc_filter[4] = { 0, };
8570                 u32 regidx;
8571                 u32 bit;
8572                 u32 crc;
8573
8574                 netdev_for_each_mc_addr(ha, dev) {
8575                         crc = calc_crc(ha->addr, ETH_ALEN);
8576                         bit = ~crc & 0x7f;
8577                         regidx = (bit & 0x60) >> 5;
8578                         bit &= 0x1f;
8579                         mc_filter[regidx] |= (1 << bit);
8580                 }
8581
8582                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8583                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8584                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8585                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8586         }
8587
8588         if (rx_mode != tp->rx_mode) {
8589                 tp->rx_mode = rx_mode;
8590                 tw32_f(MAC_RX_MODE, rx_mode);
8591                 udelay(10);
8592         }
8593 }
8594
8595 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8596 {
8597         int i;
8598
8599         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8600                 tp->rss_ind_tbl[i] =
8601                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8602 }
8603
8604 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8605 {
8606         int i;
8607
8608         if (!tg3_flag(tp, SUPPORT_MSIX))
8609                 return;
8610
8611         if (tp->irq_cnt <= 2) {
8612                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8613                 return;
8614         }
8615
8616         /* Validate table against current IRQ count */
8617         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8618                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8619                         break;
8620         }
8621
8622         if (i != TG3_RSS_INDIR_TBL_SIZE)
8623                 tg3_rss_init_dflt_indir_tbl(tp);
8624 }
8625
8626 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8627 {
8628         int i = 0;
8629         u32 reg = MAC_RSS_INDIR_TBL_0;
8630
8631         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8632                 u32 val = tp->rss_ind_tbl[i];
8633                 i++;
8634                 for (; i % 8; i++) {
8635                         val <<= 4;
8636                         val |= tp->rss_ind_tbl[i];
8637                 }
8638                 tw32(reg, val);
8639                 reg += 4;
8640         }
8641 }
8642
8643 /* tp->lock is held. */
8644 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8645 {
8646         u32 val, rdmac_mode;
8647         int i, err, limit;
8648         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8649
8650         tg3_disable_ints(tp);
8651
8652         tg3_stop_fw(tp);
8653
8654         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8655
8656         if (tg3_flag(tp, INIT_COMPLETE))
8657                 tg3_abort_hw(tp, 1);
8658
8659         /* Enable MAC control of LPI */
8660         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8661                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8662                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8663                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8664
8665                 tw32_f(TG3_CPMU_EEE_CTRL,
8666                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8667
8668                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8669                       TG3_CPMU_EEEMD_LPI_IN_TX |
8670                       TG3_CPMU_EEEMD_LPI_IN_RX |
8671                       TG3_CPMU_EEEMD_EEE_ENABLE;
8672
8673                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8674                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8675
8676                 if (tg3_flag(tp, ENABLE_APE))
8677                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8678
8679                 tw32_f(TG3_CPMU_EEE_MODE, val);
8680
8681                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8682                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8683                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8684
8685                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8686                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8687                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8688         }
8689
8690         if (reset_phy)
8691                 tg3_phy_reset(tp);
8692
8693         err = tg3_chip_reset(tp);
8694         if (err)
8695                 return err;
8696
8697         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8698
8699         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8700                 val = tr32(TG3_CPMU_CTRL);
8701                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8702                 tw32(TG3_CPMU_CTRL, val);
8703
8704                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8705                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8706                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8707                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8708
8709                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8710                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8711                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8712                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8713
8714                 val = tr32(TG3_CPMU_HST_ACC);
8715                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8716                 val |= CPMU_HST_ACC_MACCLK_6_25;
8717                 tw32(TG3_CPMU_HST_ACC, val);
8718         }
8719
8720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8721                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8722                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8723                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8724                 tw32(PCIE_PWR_MGMT_THRESH, val);
8725
8726                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8727                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8728
8729                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8730
8731                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8732                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8733         }
8734
8735         if (tg3_flag(tp, L1PLLPD_EN)) {
8736                 u32 grc_mode = tr32(GRC_MODE);
8737
8738                 /* Access the lower 1K of PL PCIE block registers. */
8739                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8740                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8741
8742                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8743                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8744                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8745
8746                 tw32(GRC_MODE, grc_mode);
8747         }
8748
8749         if (tg3_flag(tp, 57765_CLASS)) {
8750                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8751                         u32 grc_mode = tr32(GRC_MODE);
8752
8753                         /* Access the lower 1K of PL PCIE block registers. */
8754                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8755                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8756
8757                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8758                                    TG3_PCIE_PL_LO_PHYCTL5);
8759                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8760                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8761
8762                         tw32(GRC_MODE, grc_mode);
8763                 }
8764
8765                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8766                         u32 grc_mode = tr32(GRC_MODE);
8767
8768                         /* Access the lower 1K of DL PCIE block registers. */
8769                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8770                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8771
8772                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8773                                    TG3_PCIE_DL_LO_FTSMAX);
8774                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8775                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8776                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8777
8778                         tw32(GRC_MODE, grc_mode);
8779                 }
8780
8781                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8782                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8783                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8784                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8785         }
8786
8787         /* This works around an issue with Athlon chipsets on
8788          * B3 tigon3 silicon.  This bit has no effect on any
8789          * other revision.  But do not set this on PCI Express
8790          * chips and don't even touch the clocks if the CPMU is present.
8791          */
8792         if (!tg3_flag(tp, CPMU_PRESENT)) {
8793                 if (!tg3_flag(tp, PCI_EXPRESS))
8794                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8795                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8796         }
8797
8798         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8799             tg3_flag(tp, PCIX_MODE)) {
8800                 val = tr32(TG3PCI_PCISTATE);
8801                 val |= PCISTATE_RETRY_SAME_DMA;
8802                 tw32(TG3PCI_PCISTATE, val);
8803         }
8804
8805         if (tg3_flag(tp, ENABLE_APE)) {
8806                 /* Allow reads and writes to the
8807                  * APE register and memory space.
8808                  */
8809                 val = tr32(TG3PCI_PCISTATE);
8810                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8811                        PCISTATE_ALLOW_APE_SHMEM_WR |
8812                        PCISTATE_ALLOW_APE_PSPACE_WR;
8813                 tw32(TG3PCI_PCISTATE, val);
8814         }
8815
8816         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8817                 /* Enable some hw fixes.  */
8818                 val = tr32(TG3PCI_MSI_DATA);
8819                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8820                 tw32(TG3PCI_MSI_DATA, val);
8821         }
8822
8823         /* Descriptor ring init may make accesses to the
8824          * NIC SRAM area to setup the TX descriptors, so we
8825          * can only do this after the hardware has been
8826          * successfully reset.
8827          */
8828         err = tg3_init_rings(tp);
8829         if (err)
8830                 return err;
8831
8832         if (tg3_flag(tp, 57765_PLUS)) {
8833                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8834                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8835                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8836                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8837                 if (!tg3_flag(tp, 57765_CLASS) &&
8838                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8839                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8840                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8841         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8842                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8843                 /* This value is determined during the probe time DMA
8844                  * engine test, tg3_test_dma.
8845                  */
8846                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8847         }
8848
8849         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8850                           GRC_MODE_4X_NIC_SEND_RINGS |
8851                           GRC_MODE_NO_TX_PHDR_CSUM |
8852                           GRC_MODE_NO_RX_PHDR_CSUM);
8853         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8854
8855         /* Pseudo-header checksum is done by hardware logic and not
8856          * the offload processers, so make the chip do the pseudo-
8857          * header checksums on receive.  For transmit it is more
8858          * convenient to do the pseudo-header checksum in software
8859          * as Linux does that on transmit for us in all cases.
8860          */
8861         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8862
8863         tw32(GRC_MODE,
8864              tp->grc_mode |
8865              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8866
8867         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8868         val = tr32(GRC_MISC_CFG);
8869         val &= ~0xff;
8870         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8871         tw32(GRC_MISC_CFG, val);
8872
8873         /* Initialize MBUF/DESC pool. */
8874         if (tg3_flag(tp, 5750_PLUS)) {
8875                 /* Do nothing.  */
8876         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8877                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8878                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8879                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8880                 else
8881                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8882                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8883                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8884         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8885                 int fw_len;
8886
8887                 fw_len = tp->fw_len;
8888                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8889                 tw32(BUFMGR_MB_POOL_ADDR,
8890                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8891                 tw32(BUFMGR_MB_POOL_SIZE,
8892                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8893         }
8894
8895         if (tp->dev->mtu <= ETH_DATA_LEN) {
8896                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8897                      tp->bufmgr_config.mbuf_read_dma_low_water);
8898                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8899                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8900                 tw32(BUFMGR_MB_HIGH_WATER,
8901                      tp->bufmgr_config.mbuf_high_water);
8902         } else {
8903                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8904                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8905                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8906                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8907                 tw32(BUFMGR_MB_HIGH_WATER,
8908                      tp->bufmgr_config.mbuf_high_water_jumbo);
8909         }
8910         tw32(BUFMGR_DMA_LOW_WATER,
8911              tp->bufmgr_config.dma_low_water);
8912         tw32(BUFMGR_DMA_HIGH_WATER,
8913              tp->bufmgr_config.dma_high_water);
8914
8915         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8917                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8919             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8920             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8921                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8922         tw32(BUFMGR_MODE, val);
8923         for (i = 0; i < 2000; i++) {
8924                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8925                         break;
8926                 udelay(10);
8927         }
8928         if (i >= 2000) {
8929                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8930                 return -ENODEV;
8931         }
8932
8933         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8934                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8935
8936         tg3_setup_rxbd_thresholds(tp);
8937
8938         /* Initialize TG3_BDINFO's at:
8939          *  RCVDBDI_STD_BD:     standard eth size rx ring
8940          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8941          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8942          *
8943          * like so:
8944          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8945          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8946          *                              ring attribute flags
8947          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8948          *
8949          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8950          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8951          *
8952          * The size of each ring is fixed in the firmware, but the location is
8953          * configurable.
8954          */
8955         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8956              ((u64) tpr->rx_std_mapping >> 32));
8957         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8958              ((u64) tpr->rx_std_mapping & 0xffffffff));
8959         if (!tg3_flag(tp, 5717_PLUS))
8960                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8961                      NIC_SRAM_RX_BUFFER_DESC);
8962
8963         /* Disable the mini ring */
8964         if (!tg3_flag(tp, 5705_PLUS))
8965                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8966                      BDINFO_FLAGS_DISABLED);
8967
8968         /* Program the jumbo buffer descriptor ring control
8969          * blocks on those devices that have them.
8970          */
8971         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8972             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8973
8974                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8975                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8976                              ((u64) tpr->rx_jmb_mapping >> 32));
8977                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8978                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8979                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8980                               BDINFO_FLAGS_MAXLEN_SHIFT;
8981                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8982                              val | BDINFO_FLAGS_USE_EXT_RECV);
8983                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8984                             tg3_flag(tp, 57765_CLASS))
8985                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8986                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8987                 } else {
8988                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8989                              BDINFO_FLAGS_DISABLED);
8990                 }
8991
8992                 if (tg3_flag(tp, 57765_PLUS)) {
8993                         val = TG3_RX_STD_RING_SIZE(tp);
8994                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8995                         val |= (TG3_RX_STD_DMA_SZ << 2);
8996                 } else
8997                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8998         } else
8999                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9000
9001         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9002
9003         tpr->rx_std_prod_idx = tp->rx_pending;
9004         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9005
9006         tpr->rx_jmb_prod_idx =
9007                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9008         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9009
9010         tg3_rings_reset(tp);
9011
9012         /* Initialize MAC address and backoff seed. */
9013         __tg3_set_mac_addr(tp, 0);
9014
9015         /* MTU + ethernet header + FCS + optional VLAN tag */
9016         tw32(MAC_RX_MTU_SIZE,
9017              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9018
9019         /* The slot time is changed by tg3_setup_phy if we
9020          * run at gigabit with half duplex.
9021          */
9022         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9023               (6 << TX_LENGTHS_IPG_SHIFT) |
9024               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9025
9026         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9027                 val |= tr32(MAC_TX_LENGTHS) &
9028                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9029                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9030
9031         tw32(MAC_TX_LENGTHS, val);
9032
9033         /* Receive rules. */
9034         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9035         tw32(RCVLPC_CONFIG, 0x0181);
9036
9037         /* Calculate RDMAC_MODE setting early, we need it to determine
9038          * the RCVLPC_STATE_ENABLE mask.
9039          */
9040         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9041                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9042                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9043                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9044                       RDMAC_MODE_LNGREAD_ENAB);
9045
9046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9047                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9048
9049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9052                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9053                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9054                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9055
9056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9057             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9058                 if (tg3_flag(tp, TSO_CAPABLE) &&
9059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9060                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9061                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9062                            !tg3_flag(tp, IS_5788)) {
9063                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9064                 }
9065         }
9066
9067         if (tg3_flag(tp, PCI_EXPRESS))
9068                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9069
9070         if (tg3_flag(tp, HW_TSO_1) ||
9071             tg3_flag(tp, HW_TSO_2) ||
9072             tg3_flag(tp, HW_TSO_3))
9073                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9074
9075         if (tg3_flag(tp, 57765_PLUS) ||
9076             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9078                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9079
9080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9081                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9082
9083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9085             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9087             tg3_flag(tp, 57765_PLUS)) {
9088                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9089                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9090                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9091                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9092                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9093                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9094                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9095                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9096                 }
9097                 tw32(TG3_RDMA_RSRVCTRL_REG,
9098                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9099         }
9100
9101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9102             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9103                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9104                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9105                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9106                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9107         }
9108
9109         /* Receive/send statistics. */
9110         if (tg3_flag(tp, 5750_PLUS)) {
9111                 val = tr32(RCVLPC_STATS_ENABLE);
9112                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9113                 tw32(RCVLPC_STATS_ENABLE, val);
9114         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9115                    tg3_flag(tp, TSO_CAPABLE)) {
9116                 val = tr32(RCVLPC_STATS_ENABLE);
9117                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9118                 tw32(RCVLPC_STATS_ENABLE, val);
9119         } else {
9120                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9121         }
9122         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9123         tw32(SNDDATAI_STATSENAB, 0xffffff);
9124         tw32(SNDDATAI_STATSCTRL,
9125              (SNDDATAI_SCTRL_ENABLE |
9126               SNDDATAI_SCTRL_FASTUPD));
9127
9128         /* Setup host coalescing engine. */
9129         tw32(HOSTCC_MODE, 0);
9130         for (i = 0; i < 2000; i++) {
9131                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9132                         break;
9133                 udelay(10);
9134         }
9135
9136         __tg3_set_coalesce(tp, &tp->coal);
9137
9138         if (!tg3_flag(tp, 5705_PLUS)) {
9139                 /* Status/statistics block address.  See tg3_timer,
9140                  * the tg3_periodic_fetch_stats call there, and
9141                  * tg3_get_stats to see how this works for 5705/5750 chips.
9142                  */
9143                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9144                      ((u64) tp->stats_mapping >> 32));
9145                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9146                      ((u64) tp->stats_mapping & 0xffffffff));
9147                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9148
9149                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9150
9151                 /* Clear statistics and status block memory areas */
9152                 for (i = NIC_SRAM_STATS_BLK;
9153                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9154                      i += sizeof(u32)) {
9155                         tg3_write_mem(tp, i, 0);
9156                         udelay(40);
9157                 }
9158         }
9159
9160         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9161
9162         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9163         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9164         if (!tg3_flag(tp, 5705_PLUS))
9165                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9166
9167         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9168                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9169                 /* reset to prevent losing 1st rx packet intermittently */
9170                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9171                 udelay(10);
9172         }
9173
9174         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9175                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9176                         MAC_MODE_FHDE_ENABLE;
9177         if (tg3_flag(tp, ENABLE_APE))
9178                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9179         if (!tg3_flag(tp, 5705_PLUS) &&
9180             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9181             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9182                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9183         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9184         udelay(40);
9185
9186         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9187          * If TG3_FLAG_IS_NIC is zero, we should read the
9188          * register to preserve the GPIO settings for LOMs. The GPIOs,
9189          * whether used as inputs or outputs, are set by boot code after
9190          * reset.
9191          */
9192         if (!tg3_flag(tp, IS_NIC)) {
9193                 u32 gpio_mask;
9194
9195                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9196                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9197                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9198
9199                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9200                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9201                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9202
9203                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9204                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9205
9206                 tp->grc_local_ctrl &= ~gpio_mask;
9207                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9208
9209                 /* GPIO1 must be driven high for eeprom write protect */
9210                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9211                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9212                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9213         }
9214         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9215         udelay(100);
9216
9217         if (tg3_flag(tp, USING_MSIX)) {
9218                 val = tr32(MSGINT_MODE);
9219                 val |= MSGINT_MODE_ENABLE;
9220                 if (tp->irq_cnt > 1)
9221                         val |= MSGINT_MODE_MULTIVEC_EN;
9222                 if (!tg3_flag(tp, 1SHOT_MSI))
9223                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9224                 tw32(MSGINT_MODE, val);
9225         }
9226
9227         if (!tg3_flag(tp, 5705_PLUS)) {
9228                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9229                 udelay(40);
9230         }
9231
9232         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9233                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9234                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9235                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9236                WDMAC_MODE_LNGREAD_ENAB);
9237
9238         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9239             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9240                 if (tg3_flag(tp, TSO_CAPABLE) &&
9241                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9242                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9243                         /* nothing */
9244                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9245                            !tg3_flag(tp, IS_5788)) {
9246                         val |= WDMAC_MODE_RX_ACCEL;
9247                 }
9248         }
9249
9250         /* Enable host coalescing bug fix */
9251         if (tg3_flag(tp, 5755_PLUS))
9252                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9253
9254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9255                 val |= WDMAC_MODE_BURST_ALL_DATA;
9256
9257         tw32_f(WDMAC_MODE, val);
9258         udelay(40);
9259
9260         if (tg3_flag(tp, PCIX_MODE)) {
9261                 u16 pcix_cmd;
9262
9263                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9264                                      &pcix_cmd);
9265                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9266                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9267                         pcix_cmd |= PCI_X_CMD_READ_2K;
9268                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9269                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9270                         pcix_cmd |= PCI_X_CMD_READ_2K;
9271                 }
9272                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9273                                       pcix_cmd);
9274         }
9275
9276         tw32_f(RDMAC_MODE, rdmac_mode);
9277         udelay(40);
9278
9279         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9280         if (!tg3_flag(tp, 5705_PLUS))
9281                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9282
9283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9284                 tw32(SNDDATAC_MODE,
9285                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9286         else
9287                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9288
9289         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9290         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9291         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9292         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9293                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9294         tw32(RCVDBDI_MODE, val);
9295         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9296         if (tg3_flag(tp, HW_TSO_1) ||
9297             tg3_flag(tp, HW_TSO_2) ||
9298             tg3_flag(tp, HW_TSO_3))
9299                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9300         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9301         if (tg3_flag(tp, ENABLE_TSS))
9302                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9303         tw32(SNDBDI_MODE, val);
9304         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9305
9306         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9307                 err = tg3_load_5701_a0_firmware_fix(tp);
9308                 if (err)
9309                         return err;
9310         }
9311
9312         if (tg3_flag(tp, TSO_CAPABLE)) {
9313                 err = tg3_load_tso_firmware(tp);
9314                 if (err)
9315                         return err;
9316         }
9317
9318         tp->tx_mode = TX_MODE_ENABLE;
9319
9320         if (tg3_flag(tp, 5755_PLUS) ||
9321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9322                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9323
9324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9325                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9326                 tp->tx_mode &= ~val;
9327                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9328         }
9329
9330         tw32_f(MAC_TX_MODE, tp->tx_mode);
9331         udelay(100);
9332
9333         if (tg3_flag(tp, ENABLE_RSS)) {
9334                 tg3_rss_write_indir_tbl(tp);
9335
9336                 /* Setup the "secret" hash key. */
9337                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9338                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9339                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9340                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9341                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9342                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9343                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9344                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9345                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9346                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9347         }
9348
9349         tp->rx_mode = RX_MODE_ENABLE;
9350         if (tg3_flag(tp, 5755_PLUS))
9351                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9352
9353         if (tg3_flag(tp, ENABLE_RSS))
9354                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9355                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9356                                RX_MODE_RSS_IPV6_HASH_EN |
9357                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9358                                RX_MODE_RSS_IPV4_HASH_EN |
9359                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9360
9361         tw32_f(MAC_RX_MODE, tp->rx_mode);
9362         udelay(10);
9363
9364         tw32(MAC_LED_CTRL, tp->led_ctrl);
9365
9366         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9367         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9368                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9369                 udelay(10);
9370         }
9371         tw32_f(MAC_RX_MODE, tp->rx_mode);
9372         udelay(10);
9373
9374         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9375                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9376                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9377                         /* Set drive transmission level to 1.2V  */
9378                         /* only if the signal pre-emphasis bit is not set  */
9379                         val = tr32(MAC_SERDES_CFG);
9380                         val &= 0xfffff000;
9381                         val |= 0x880;
9382                         tw32(MAC_SERDES_CFG, val);
9383                 }
9384                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9385                         tw32(MAC_SERDES_CFG, 0x616000);
9386         }
9387
9388         /* Prevent chip from dropping frames when flow control
9389          * is enabled.
9390          */
9391         if (tg3_flag(tp, 57765_CLASS))
9392                 val = 1;
9393         else
9394                 val = 2;
9395         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9396
9397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9398             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9399                 /* Use hardware link auto-negotiation */
9400                 tg3_flag_set(tp, HW_AUTONEG);
9401         }
9402
9403         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9405                 u32 tmp;
9406
9407                 tmp = tr32(SERDES_RX_CTRL);
9408                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9409                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9410                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9411                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9412         }
9413
9414         if (!tg3_flag(tp, USE_PHYLIB)) {
9415                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9416                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9417
9418                 err = tg3_setup_phy(tp, 0);
9419                 if (err)
9420                         return err;
9421
9422                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9423                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9424                         u32 tmp;
9425
9426                         /* Clear CRC stats. */
9427                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9428                                 tg3_writephy(tp, MII_TG3_TEST1,
9429                                              tmp | MII_TG3_TEST1_CRC_EN);
9430                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9431                         }
9432                 }
9433         }
9434
9435         __tg3_set_rx_mode(tp->dev);
9436
9437         /* Initialize receive rules. */
9438         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9439         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9440         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9441         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9442
9443         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9444                 limit = 8;
9445         else
9446                 limit = 16;
9447         if (tg3_flag(tp, ENABLE_ASF))
9448                 limit -= 4;
9449         switch (limit) {
9450         case 16:
9451                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9452         case 15:
9453                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9454         case 14:
9455                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9456         case 13:
9457                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9458         case 12:
9459                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9460         case 11:
9461                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9462         case 10:
9463                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9464         case 9:
9465                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9466         case 8:
9467                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9468         case 7:
9469                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9470         case 6:
9471                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9472         case 5:
9473                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9474         case 4:
9475                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9476         case 3:
9477                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9478         case 2:
9479         case 1:
9480
9481         default:
9482                 break;
9483         }
9484
9485         if (tg3_flag(tp, ENABLE_APE))
9486                 /* Write our heartbeat update interval to APE. */
9487                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9488                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9489
9490         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9491
9492         return 0;
9493 }
9494
9495 /* Called at device open time to get the chip ready for
9496  * packet processing.  Invoked with tp->lock held.
9497  */
9498 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9499 {
9500         tg3_switch_clocks(tp);
9501
9502         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9503
9504         return tg3_reset_hw(tp, reset_phy);
9505 }
9506
9507 #if IS_ENABLED(CONFIG_HWMON)
9508 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9509 {
9510         int i;
9511
9512         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9513                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9514
9515                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9516                 off += len;
9517
9518                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9519                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9520                         memset(ocir, 0, TG3_OCIR_LEN);
9521         }
9522 }
9523
9524 /* sysfs attributes for hwmon */
9525 static ssize_t tg3_show_temp(struct device *dev,
9526                              struct device_attribute *devattr, char *buf)
9527 {
9528         struct pci_dev *pdev = to_pci_dev(dev);
9529         struct net_device *netdev = pci_get_drvdata(pdev);
9530         struct tg3 *tp = netdev_priv(netdev);
9531         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9532         u32 temperature;
9533
9534         spin_lock_bh(&tp->lock);
9535         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9536                                 sizeof(temperature));
9537         spin_unlock_bh(&tp->lock);
9538         return sprintf(buf, "%u\n", temperature);
9539 }
9540
9541
9542 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9543                           TG3_TEMP_SENSOR_OFFSET);
9544 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9545                           TG3_TEMP_CAUTION_OFFSET);
9546 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9547                           TG3_TEMP_MAX_OFFSET);
9548
9549 static struct attribute *tg3_attributes[] = {
9550         &sensor_dev_attr_temp1_input.dev_attr.attr,
9551         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9552         &sensor_dev_attr_temp1_max.dev_attr.attr,
9553         NULL
9554 };
9555
9556 static const struct attribute_group tg3_group = {
9557         .attrs = tg3_attributes,
9558 };
9559
9560 #endif
9561
9562 static void tg3_hwmon_close(struct tg3 *tp)
9563 {
9564 #if IS_ENABLED(CONFIG_HWMON)
9565         if (tp->hwmon_dev) {
9566                 hwmon_device_unregister(tp->hwmon_dev);
9567                 tp->hwmon_dev = NULL;
9568                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9569         }
9570 #endif
9571 }
9572
9573 static void tg3_hwmon_open(struct tg3 *tp)
9574 {
9575 #if IS_ENABLED(CONFIG_HWMON)
9576         int i, err;
9577         u32 size = 0;
9578         struct pci_dev *pdev = tp->pdev;
9579         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9580
9581         tg3_sd_scan_scratchpad(tp, ocirs);
9582
9583         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9584                 if (!ocirs[i].src_data_length)
9585                         continue;
9586
9587                 size += ocirs[i].src_hdr_length;
9588                 size += ocirs[i].src_data_length;
9589         }
9590
9591         if (!size)
9592                 return;
9593
9594         /* Register hwmon sysfs hooks */
9595         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9596         if (err) {
9597                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9598                 return;
9599         }
9600
9601         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9602         if (IS_ERR(tp->hwmon_dev)) {
9603                 tp->hwmon_dev = NULL;
9604                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9605                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9606         }
9607 #endif
9608 }
9609
9610
9611 #define TG3_STAT_ADD32(PSTAT, REG) \
9612 do {    u32 __val = tr32(REG); \
9613         (PSTAT)->low += __val; \
9614         if ((PSTAT)->low < __val) \
9615                 (PSTAT)->high += 1; \
9616 } while (0)
9617
9618 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9619 {
9620         struct tg3_hw_stats *sp = tp->hw_stats;
9621
9622         if (!netif_carrier_ok(tp->dev))
9623                 return;
9624
9625         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9626         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9627         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9628         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9629         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9630         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9631         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9632         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9633         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9634         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9635         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9636         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9637         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9638
9639         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9640         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9641         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9642         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9643         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9644         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9645         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9646         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9647         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9648         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9649         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9650         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9651         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9652         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9653
9654         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9655         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9656             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9657             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9658                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9659         } else {
9660                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9661                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9662                 if (val) {
9663                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9664                         sp->rx_discards.low += val;
9665                         if (sp->rx_discards.low < val)
9666                                 sp->rx_discards.high += 1;
9667                 }
9668                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9669         }
9670         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9671 }
9672
9673 static void tg3_chk_missed_msi(struct tg3 *tp)
9674 {
9675         u32 i;
9676
9677         for (i = 0; i < tp->irq_cnt; i++) {
9678                 struct tg3_napi *tnapi = &tp->napi[i];
9679
9680                 if (tg3_has_work(tnapi)) {
9681                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9682                             tnapi->last_tx_cons == tnapi->tx_cons) {
9683                                 if (tnapi->chk_msi_cnt < 1) {
9684                                         tnapi->chk_msi_cnt++;
9685                                         return;
9686                                 }
9687                                 tg3_msi(0, tnapi);
9688                         }
9689                 }
9690                 tnapi->chk_msi_cnt = 0;
9691                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9692                 tnapi->last_tx_cons = tnapi->tx_cons;
9693         }
9694 }
9695
9696 static void tg3_timer(unsigned long __opaque)
9697 {
9698         struct tg3 *tp = (struct tg3 *) __opaque;
9699
9700         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9701                 goto restart_timer;
9702
9703         spin_lock(&tp->lock);
9704
9705         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9706             tg3_flag(tp, 57765_CLASS))
9707                 tg3_chk_missed_msi(tp);
9708
9709         if (!tg3_flag(tp, TAGGED_STATUS)) {
9710                 /* All of this garbage is because when using non-tagged
9711                  * IRQ status the mailbox/status_block protocol the chip
9712                  * uses with the cpu is race prone.
9713                  */
9714                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9715                         tw32(GRC_LOCAL_CTRL,
9716                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9717                 } else {
9718                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9719                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9720                 }
9721
9722                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9723                         spin_unlock(&tp->lock);
9724                         tg3_reset_task_schedule(tp);
9725                         goto restart_timer;
9726                 }
9727         }
9728
9729         /* This part only runs once per second. */
9730         if (!--tp->timer_counter) {
9731                 if (tg3_flag(tp, 5705_PLUS))
9732                         tg3_periodic_fetch_stats(tp);
9733
9734                 if (tp->setlpicnt && !--tp->setlpicnt)
9735                         tg3_phy_eee_enable(tp);
9736
9737                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9738                         u32 mac_stat;
9739                         int phy_event;
9740
9741                         mac_stat = tr32(MAC_STATUS);
9742
9743                         phy_event = 0;
9744                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9745                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9746                                         phy_event = 1;
9747                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9748                                 phy_event = 1;
9749
9750                         if (phy_event)
9751                                 tg3_setup_phy(tp, 0);
9752                 } else if (tg3_flag(tp, POLL_SERDES)) {
9753                         u32 mac_stat = tr32(MAC_STATUS);
9754                         int need_setup = 0;
9755
9756                         if (netif_carrier_ok(tp->dev) &&
9757                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9758                                 need_setup = 1;
9759                         }
9760                         if (!netif_carrier_ok(tp->dev) &&
9761                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9762                                          MAC_STATUS_SIGNAL_DET))) {
9763                                 need_setup = 1;
9764                         }
9765                         if (need_setup) {
9766                                 if (!tp->serdes_counter) {
9767                                         tw32_f(MAC_MODE,
9768                                              (tp->mac_mode &
9769                                               ~MAC_MODE_PORT_MODE_MASK));
9770                                         udelay(40);
9771                                         tw32_f(MAC_MODE, tp->mac_mode);
9772                                         udelay(40);
9773                                 }
9774                                 tg3_setup_phy(tp, 0);
9775                         }
9776                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9777                            tg3_flag(tp, 5780_CLASS)) {
9778                         tg3_serdes_parallel_detect(tp);
9779                 }
9780
9781                 tp->timer_counter = tp->timer_multiplier;
9782         }
9783
9784         /* Heartbeat is only sent once every 2 seconds.
9785          *
9786          * The heartbeat is to tell the ASF firmware that the host
9787          * driver is still alive.  In the event that the OS crashes,
9788          * ASF needs to reset the hardware to free up the FIFO space
9789          * that may be filled with rx packets destined for the host.
9790          * If the FIFO is full, ASF will no longer function properly.
9791          *
9792          * Unintended resets have been reported on real time kernels
9793          * where the timer doesn't run on time.  Netpoll will also have
9794          * same problem.
9795          *
9796          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9797          * to check the ring condition when the heartbeat is expiring
9798          * before doing the reset.  This will prevent most unintended
9799          * resets.
9800          */
9801         if (!--tp->asf_counter) {
9802                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9803                         tg3_wait_for_event_ack(tp);
9804
9805                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9806                                       FWCMD_NICDRV_ALIVE3);
9807                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9808                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9809                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9810
9811                         tg3_generate_fw_event(tp);
9812                 }
9813                 tp->asf_counter = tp->asf_multiplier;
9814         }
9815
9816         spin_unlock(&tp->lock);
9817
9818 restart_timer:
9819         tp->timer.expires = jiffies + tp->timer_offset;
9820         add_timer(&tp->timer);
9821 }
9822
9823 static void __devinit tg3_timer_init(struct tg3 *tp)
9824 {
9825         if (tg3_flag(tp, TAGGED_STATUS) &&
9826             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9827             !tg3_flag(tp, 57765_CLASS))
9828                 tp->timer_offset = HZ;
9829         else
9830                 tp->timer_offset = HZ / 10;
9831
9832         BUG_ON(tp->timer_offset > HZ);
9833
9834         tp->timer_multiplier = (HZ / tp->timer_offset);
9835         tp->asf_multiplier = (HZ / tp->timer_offset) *
9836                              TG3_FW_UPDATE_FREQ_SEC;
9837
9838         init_timer(&tp->timer);
9839         tp->timer.data = (unsigned long) tp;
9840         tp->timer.function = tg3_timer;
9841 }
9842
9843 static void tg3_timer_start(struct tg3 *tp)
9844 {
9845         tp->asf_counter   = tp->asf_multiplier;
9846         tp->timer_counter = tp->timer_multiplier;
9847
9848         tp->timer.expires = jiffies + tp->timer_offset;
9849         add_timer(&tp->timer);
9850 }
9851
9852 static void tg3_timer_stop(struct tg3 *tp)
9853 {
9854         del_timer_sync(&tp->timer);
9855 }
9856
9857 /* Restart hardware after configuration changes, self-test, etc.
9858  * Invoked with tp->lock held.
9859  */
9860 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9861         __releases(tp->lock)
9862         __acquires(tp->lock)
9863 {
9864         int err;
9865
9866         err = tg3_init_hw(tp, reset_phy);
9867         if (err) {
9868                 netdev_err(tp->dev,
9869                            "Failed to re-initialize device, aborting\n");
9870                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9871                 tg3_full_unlock(tp);
9872                 tg3_timer_stop(tp);
9873                 tp->irq_sync = 0;
9874                 tg3_napi_enable(tp);
9875                 dev_close(tp->dev);
9876                 tg3_full_lock(tp, 0);
9877         }
9878         return err;
9879 }
9880
9881 static void tg3_reset_task(struct work_struct *work)
9882 {
9883         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9884         int err;
9885
9886         tg3_full_lock(tp, 0);
9887
9888         if (!netif_running(tp->dev)) {
9889                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9890                 tg3_full_unlock(tp);
9891                 return;
9892         }
9893
9894         tg3_full_unlock(tp);
9895
9896         tg3_phy_stop(tp);
9897
9898         tg3_netif_stop(tp);
9899
9900         tg3_full_lock(tp, 1);
9901
9902         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9903                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9904                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9905                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9906                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9907         }
9908
9909         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9910         err = tg3_init_hw(tp, 1);
9911         if (err)
9912                 goto out;
9913
9914         tg3_netif_start(tp);
9915
9916 out:
9917         tg3_full_unlock(tp);
9918
9919         if (!err)
9920                 tg3_phy_start(tp);
9921
9922         tg3_flag_clear(tp, RESET_TASK_PENDING);
9923 }
9924
9925 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9926 {
9927         irq_handler_t fn;
9928         unsigned long flags;
9929         char *name;
9930         struct tg3_napi *tnapi = &tp->napi[irq_num];
9931
9932         if (tp->irq_cnt == 1)
9933                 name = tp->dev->name;
9934         else {
9935                 name = &tnapi->irq_lbl[0];
9936                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9937                 name[IFNAMSIZ-1] = 0;
9938         }
9939
9940         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9941                 fn = tg3_msi;
9942                 if (tg3_flag(tp, 1SHOT_MSI))
9943                         fn = tg3_msi_1shot;
9944                 flags = 0;
9945         } else {
9946                 fn = tg3_interrupt;
9947                 if (tg3_flag(tp, TAGGED_STATUS))
9948                         fn = tg3_interrupt_tagged;
9949                 flags = IRQF_SHARED;
9950         }
9951
9952         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9953 }
9954
9955 static int tg3_test_interrupt(struct tg3 *tp)
9956 {
9957         struct tg3_napi *tnapi = &tp->napi[0];
9958         struct net_device *dev = tp->dev;
9959         int err, i, intr_ok = 0;
9960         u32 val;
9961
9962         if (!netif_running(dev))
9963                 return -ENODEV;
9964
9965         tg3_disable_ints(tp);
9966
9967         free_irq(tnapi->irq_vec, tnapi);
9968
9969         /*
9970          * Turn off MSI one shot mode.  Otherwise this test has no
9971          * observable way to know whether the interrupt was delivered.
9972          */
9973         if (tg3_flag(tp, 57765_PLUS)) {
9974                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9975                 tw32(MSGINT_MODE, val);
9976         }
9977
9978         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9979                           IRQF_SHARED, dev->name, tnapi);
9980         if (err)
9981                 return err;
9982
9983         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9984         tg3_enable_ints(tp);
9985
9986         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9987                tnapi->coal_now);
9988
9989         for (i = 0; i < 5; i++) {
9990                 u32 int_mbox, misc_host_ctrl;
9991
9992                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9993                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9994
9995                 if ((int_mbox != 0) ||
9996                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9997                         intr_ok = 1;
9998                         break;
9999                 }
10000
10001                 if (tg3_flag(tp, 57765_PLUS) &&
10002                     tnapi->hw_status->status_tag != tnapi->last_tag)
10003                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10004
10005                 msleep(10);
10006         }
10007
10008         tg3_disable_ints(tp);
10009
10010         free_irq(tnapi->irq_vec, tnapi);
10011
10012         err = tg3_request_irq(tp, 0);
10013
10014         if (err)
10015                 return err;
10016
10017         if (intr_ok) {
10018                 /* Reenable MSI one shot mode. */
10019                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10020                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10021                         tw32(MSGINT_MODE, val);
10022                 }
10023                 return 0;
10024         }
10025
10026         return -EIO;
10027 }
10028
10029 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10030  * successfully restored
10031  */
10032 static int tg3_test_msi(struct tg3 *tp)
10033 {
10034         int err;
10035         u16 pci_cmd;
10036
10037         if (!tg3_flag(tp, USING_MSI))
10038                 return 0;
10039
10040         /* Turn off SERR reporting in case MSI terminates with Master
10041          * Abort.
10042          */
10043         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10044         pci_write_config_word(tp->pdev, PCI_COMMAND,
10045                               pci_cmd & ~PCI_COMMAND_SERR);
10046
10047         err = tg3_test_interrupt(tp);
10048
10049         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10050
10051         if (!err)
10052                 return 0;
10053
10054         /* other failures */
10055         if (err != -EIO)
10056                 return err;
10057
10058         /* MSI test failed, go back to INTx mode */
10059         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10060                     "to INTx mode. Please report this failure to the PCI "
10061                     "maintainer and include system chipset information\n");
10062
10063         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10064
10065         pci_disable_msi(tp->pdev);
10066
10067         tg3_flag_clear(tp, USING_MSI);
10068         tp->napi[0].irq_vec = tp->pdev->irq;
10069
10070         err = tg3_request_irq(tp, 0);
10071         if (err)
10072                 return err;
10073
10074         /* Need to reset the chip because the MSI cycle may have terminated
10075          * with Master Abort.
10076          */
10077         tg3_full_lock(tp, 1);
10078
10079         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10080         err = tg3_init_hw(tp, 1);
10081
10082         tg3_full_unlock(tp);
10083
10084         if (err)
10085                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10086
10087         return err;
10088 }
10089
10090 static int tg3_request_firmware(struct tg3 *tp)
10091 {
10092         const __be32 *fw_data;
10093
10094         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10095                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10096                            tp->fw_needed);
10097                 return -ENOENT;
10098         }
10099
10100         fw_data = (void *)tp->fw->data;
10101
10102         /* Firmware blob starts with version numbers, followed by
10103          * start address and _full_ length including BSS sections
10104          * (which must be longer than the actual data, of course
10105          */
10106
10107         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10108         if (tp->fw_len < (tp->fw->size - 12)) {
10109                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10110                            tp->fw_len, tp->fw_needed);
10111                 release_firmware(tp->fw);
10112                 tp->fw = NULL;
10113                 return -EINVAL;
10114         }
10115
10116         /* We no longer need firmware; we have it. */
10117         tp->fw_needed = NULL;
10118         return 0;
10119 }
10120
10121 static bool tg3_enable_msix(struct tg3 *tp)
10122 {
10123         int i, rc;
10124         struct msix_entry msix_ent[tp->irq_max];
10125
10126         tp->irq_cnt = netif_get_num_default_rss_queues();
10127         if (tp->irq_cnt > 1) {
10128                 /* We want as many rx rings enabled as there are cpus.
10129                  * In multiqueue MSI-X mode, the first MSI-X vector
10130                  * only deals with link interrupts, etc, so we add
10131                  * one to the number of vectors we are requesting.
10132                  */
10133                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
10134         }
10135
10136         for (i = 0; i < tp->irq_max; i++) {
10137                 msix_ent[i].entry  = i;
10138                 msix_ent[i].vector = 0;
10139         }
10140
10141         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10142         if (rc < 0) {
10143                 return false;
10144         } else if (rc != 0) {
10145                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10146                         return false;
10147                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10148                               tp->irq_cnt, rc);
10149                 tp->irq_cnt = rc;
10150         }
10151
10152         for (i = 0; i < tp->irq_max; i++)
10153                 tp->napi[i].irq_vec = msix_ent[i].vector;
10154
10155         netif_set_real_num_tx_queues(tp->dev, 1);
10156         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10157         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10158                 pci_disable_msix(tp->pdev);
10159                 return false;
10160         }
10161
10162         if (tp->irq_cnt > 1) {
10163                 tg3_flag_set(tp, ENABLE_RSS);
10164
10165                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10166                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
10167                         tg3_flag_set(tp, ENABLE_TSS);
10168                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
10169                 }
10170         }
10171
10172         return true;
10173 }
10174
10175 static void tg3_ints_init(struct tg3 *tp)
10176 {
10177         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10178             !tg3_flag(tp, TAGGED_STATUS)) {
10179                 /* All MSI supporting chips should support tagged
10180                  * status.  Assert that this is the case.
10181                  */
10182                 netdev_warn(tp->dev,
10183                             "MSI without TAGGED_STATUS? Not using MSI\n");
10184                 goto defcfg;
10185         }
10186
10187         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10188                 tg3_flag_set(tp, USING_MSIX);
10189         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10190                 tg3_flag_set(tp, USING_MSI);
10191
10192         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10193                 u32 msi_mode = tr32(MSGINT_MODE);
10194                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10195                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10196                 if (!tg3_flag(tp, 1SHOT_MSI))
10197                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10198                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10199         }
10200 defcfg:
10201         if (!tg3_flag(tp, USING_MSIX)) {
10202                 tp->irq_cnt = 1;
10203                 tp->napi[0].irq_vec = tp->pdev->irq;
10204                 netif_set_real_num_tx_queues(tp->dev, 1);
10205                 netif_set_real_num_rx_queues(tp->dev, 1);
10206         }
10207 }
10208
10209 static void tg3_ints_fini(struct tg3 *tp)
10210 {
10211         if (tg3_flag(tp, USING_MSIX))
10212                 pci_disable_msix(tp->pdev);
10213         else if (tg3_flag(tp, USING_MSI))
10214                 pci_disable_msi(tp->pdev);
10215         tg3_flag_clear(tp, USING_MSI);
10216         tg3_flag_clear(tp, USING_MSIX);
10217         tg3_flag_clear(tp, ENABLE_RSS);
10218         tg3_flag_clear(tp, ENABLE_TSS);
10219 }
10220
10221 static int tg3_open(struct net_device *dev)
10222 {
10223         struct tg3 *tp = netdev_priv(dev);
10224         int i, err;
10225
10226         if (tp->fw_needed) {
10227                 err = tg3_request_firmware(tp);
10228                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10229                         if (err)
10230                                 return err;
10231                 } else if (err) {
10232                         netdev_warn(tp->dev, "TSO capability disabled\n");
10233                         tg3_flag_clear(tp, TSO_CAPABLE);
10234                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10235                         netdev_notice(tp->dev, "TSO capability restored\n");
10236                         tg3_flag_set(tp, TSO_CAPABLE);
10237                 }
10238         }
10239
10240         netif_carrier_off(tp->dev);
10241
10242         err = tg3_power_up(tp);
10243         if (err)
10244                 return err;
10245
10246         tg3_full_lock(tp, 0);
10247
10248         tg3_disable_ints(tp);
10249         tg3_flag_clear(tp, INIT_COMPLETE);
10250
10251         tg3_full_unlock(tp);
10252
10253         /*
10254          * Setup interrupts first so we know how
10255          * many NAPI resources to allocate
10256          */
10257         tg3_ints_init(tp);
10258
10259         tg3_rss_check_indir_tbl(tp);
10260
10261         /* The placement of this call is tied
10262          * to the setup and use of Host TX descriptors.
10263          */
10264         err = tg3_alloc_consistent(tp);
10265         if (err)
10266                 goto err_out1;
10267
10268         tg3_napi_init(tp);
10269
10270         tg3_napi_enable(tp);
10271
10272         for (i = 0; i < tp->irq_cnt; i++) {
10273                 struct tg3_napi *tnapi = &tp->napi[i];
10274                 err = tg3_request_irq(tp, i);
10275                 if (err) {
10276                         for (i--; i >= 0; i--) {
10277                                 tnapi = &tp->napi[i];
10278                                 free_irq(tnapi->irq_vec, tnapi);
10279                         }
10280                         goto err_out2;
10281                 }
10282         }
10283
10284         tg3_full_lock(tp, 0);
10285
10286         err = tg3_init_hw(tp, 1);
10287         if (err) {
10288                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10289                 tg3_free_rings(tp);
10290         }
10291
10292         tg3_full_unlock(tp);
10293
10294         if (err)
10295                 goto err_out3;
10296
10297         if (tg3_flag(tp, USING_MSI)) {
10298                 err = tg3_test_msi(tp);
10299
10300                 if (err) {
10301                         tg3_full_lock(tp, 0);
10302                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10303                         tg3_free_rings(tp);
10304                         tg3_full_unlock(tp);
10305
10306                         goto err_out2;
10307                 }
10308
10309                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10310                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10311
10312                         tw32(PCIE_TRANSACTION_CFG,
10313                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10314                 }
10315         }
10316
10317         tg3_phy_start(tp);
10318
10319         tg3_hwmon_open(tp);
10320
10321         tg3_full_lock(tp, 0);
10322
10323         tg3_timer_start(tp);
10324         tg3_flag_set(tp, INIT_COMPLETE);
10325         tg3_enable_ints(tp);
10326
10327         tg3_full_unlock(tp);
10328
10329         netif_tx_start_all_queues(dev);
10330
10331         /*
10332          * Reset loopback feature if it was turned on while the device was down
10333          * make sure that it's installed properly now.
10334          */
10335         if (dev->features & NETIF_F_LOOPBACK)
10336                 tg3_set_loopback(dev, dev->features);
10337
10338         return 0;
10339
10340 err_out3:
10341         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10342                 struct tg3_napi *tnapi = &tp->napi[i];
10343                 free_irq(tnapi->irq_vec, tnapi);
10344         }
10345
10346 err_out2:
10347         tg3_napi_disable(tp);
10348         tg3_napi_fini(tp);
10349         tg3_free_consistent(tp);
10350
10351 err_out1:
10352         tg3_ints_fini(tp);
10353         tg3_frob_aux_power(tp, false);
10354         pci_set_power_state(tp->pdev, PCI_D3hot);
10355         return err;
10356 }
10357
10358 static int tg3_close(struct net_device *dev)
10359 {
10360         int i;
10361         struct tg3 *tp = netdev_priv(dev);
10362
10363         tg3_napi_disable(tp);
10364         tg3_reset_task_cancel(tp);
10365
10366         netif_tx_stop_all_queues(dev);
10367
10368         tg3_timer_stop(tp);
10369
10370         tg3_hwmon_close(tp);
10371
10372         tg3_phy_stop(tp);
10373
10374         tg3_full_lock(tp, 1);
10375
10376         tg3_disable_ints(tp);
10377
10378         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10379         tg3_free_rings(tp);
10380         tg3_flag_clear(tp, INIT_COMPLETE);
10381
10382         tg3_full_unlock(tp);
10383
10384         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10385                 struct tg3_napi *tnapi = &tp->napi[i];
10386                 free_irq(tnapi->irq_vec, tnapi);
10387         }
10388
10389         tg3_ints_fini(tp);
10390
10391         /* Clear stats across close / open calls */
10392         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10393         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10394
10395         tg3_napi_fini(tp);
10396
10397         tg3_free_consistent(tp);
10398
10399         tg3_power_down(tp);
10400
10401         netif_carrier_off(tp->dev);
10402
10403         return 0;
10404 }
10405
10406 static inline u64 get_stat64(tg3_stat64_t *val)
10407 {
10408        return ((u64)val->high << 32) | ((u64)val->low);
10409 }
10410
10411 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10412 {
10413         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10414
10415         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10416             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10417              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10418                 u32 val;
10419
10420                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10421                         tg3_writephy(tp, MII_TG3_TEST1,
10422                                      val | MII_TG3_TEST1_CRC_EN);
10423                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10424                 } else
10425                         val = 0;
10426
10427                 tp->phy_crc_errors += val;
10428
10429                 return tp->phy_crc_errors;
10430         }
10431
10432         return get_stat64(&hw_stats->rx_fcs_errors);
10433 }
10434
10435 #define ESTAT_ADD(member) \
10436         estats->member =        old_estats->member + \
10437                                 get_stat64(&hw_stats->member)
10438
10439 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10440 {
10441         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10442         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10443
10444         ESTAT_ADD(rx_octets);
10445         ESTAT_ADD(rx_fragments);
10446         ESTAT_ADD(rx_ucast_packets);
10447         ESTAT_ADD(rx_mcast_packets);
10448         ESTAT_ADD(rx_bcast_packets);
10449         ESTAT_ADD(rx_fcs_errors);
10450         ESTAT_ADD(rx_align_errors);
10451         ESTAT_ADD(rx_xon_pause_rcvd);
10452         ESTAT_ADD(rx_xoff_pause_rcvd);
10453         ESTAT_ADD(rx_mac_ctrl_rcvd);
10454         ESTAT_ADD(rx_xoff_entered);
10455         ESTAT_ADD(rx_frame_too_long_errors);
10456         ESTAT_ADD(rx_jabbers);
10457         ESTAT_ADD(rx_undersize_packets);
10458         ESTAT_ADD(rx_in_length_errors);
10459         ESTAT_ADD(rx_out_length_errors);
10460         ESTAT_ADD(rx_64_or_less_octet_packets);
10461         ESTAT_ADD(rx_65_to_127_octet_packets);
10462         ESTAT_ADD(rx_128_to_255_octet_packets);
10463         ESTAT_ADD(rx_256_to_511_octet_packets);
10464         ESTAT_ADD(rx_512_to_1023_octet_packets);
10465         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10466         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10467         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10468         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10469         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10470
10471         ESTAT_ADD(tx_octets);
10472         ESTAT_ADD(tx_collisions);
10473         ESTAT_ADD(tx_xon_sent);
10474         ESTAT_ADD(tx_xoff_sent);
10475         ESTAT_ADD(tx_flow_control);
10476         ESTAT_ADD(tx_mac_errors);
10477         ESTAT_ADD(tx_single_collisions);
10478         ESTAT_ADD(tx_mult_collisions);
10479         ESTAT_ADD(tx_deferred);
10480         ESTAT_ADD(tx_excessive_collisions);
10481         ESTAT_ADD(tx_late_collisions);
10482         ESTAT_ADD(tx_collide_2times);
10483         ESTAT_ADD(tx_collide_3times);
10484         ESTAT_ADD(tx_collide_4times);
10485         ESTAT_ADD(tx_collide_5times);
10486         ESTAT_ADD(tx_collide_6times);
10487         ESTAT_ADD(tx_collide_7times);
10488         ESTAT_ADD(tx_collide_8times);
10489         ESTAT_ADD(tx_collide_9times);
10490         ESTAT_ADD(tx_collide_10times);
10491         ESTAT_ADD(tx_collide_11times);
10492         ESTAT_ADD(tx_collide_12times);
10493         ESTAT_ADD(tx_collide_13times);
10494         ESTAT_ADD(tx_collide_14times);
10495         ESTAT_ADD(tx_collide_15times);
10496         ESTAT_ADD(tx_ucast_packets);
10497         ESTAT_ADD(tx_mcast_packets);
10498         ESTAT_ADD(tx_bcast_packets);
10499         ESTAT_ADD(tx_carrier_sense_errors);
10500         ESTAT_ADD(tx_discards);
10501         ESTAT_ADD(tx_errors);
10502
10503         ESTAT_ADD(dma_writeq_full);
10504         ESTAT_ADD(dma_write_prioq_full);
10505         ESTAT_ADD(rxbds_empty);
10506         ESTAT_ADD(rx_discards);
10507         ESTAT_ADD(rx_errors);
10508         ESTAT_ADD(rx_threshold_hit);
10509
10510         ESTAT_ADD(dma_readq_full);
10511         ESTAT_ADD(dma_read_prioq_full);
10512         ESTAT_ADD(tx_comp_queue_full);
10513
10514         ESTAT_ADD(ring_set_send_prod_index);
10515         ESTAT_ADD(ring_status_update);
10516         ESTAT_ADD(nic_irqs);
10517         ESTAT_ADD(nic_avoided_irqs);
10518         ESTAT_ADD(nic_tx_threshold_hit);
10519
10520         ESTAT_ADD(mbuf_lwm_thresh_hit);
10521 }
10522
10523 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10524 {
10525         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10526         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10527
10528         stats->rx_packets = old_stats->rx_packets +
10529                 get_stat64(&hw_stats->rx_ucast_packets) +
10530                 get_stat64(&hw_stats->rx_mcast_packets) +
10531                 get_stat64(&hw_stats->rx_bcast_packets);
10532
10533         stats->tx_packets = old_stats->tx_packets +
10534                 get_stat64(&hw_stats->tx_ucast_packets) +
10535                 get_stat64(&hw_stats->tx_mcast_packets) +
10536                 get_stat64(&hw_stats->tx_bcast_packets);
10537
10538         stats->rx_bytes = old_stats->rx_bytes +
10539                 get_stat64(&hw_stats->rx_octets);
10540         stats->tx_bytes = old_stats->tx_bytes +
10541                 get_stat64(&hw_stats->tx_octets);
10542
10543         stats->rx_errors = old_stats->rx_errors +
10544                 get_stat64(&hw_stats->rx_errors);
10545         stats->tx_errors = old_stats->tx_errors +
10546                 get_stat64(&hw_stats->tx_errors) +
10547                 get_stat64(&hw_stats->tx_mac_errors) +
10548                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10549                 get_stat64(&hw_stats->tx_discards);
10550
10551         stats->multicast = old_stats->multicast +
10552                 get_stat64(&hw_stats->rx_mcast_packets);
10553         stats->collisions = old_stats->collisions +
10554                 get_stat64(&hw_stats->tx_collisions);
10555
10556         stats->rx_length_errors = old_stats->rx_length_errors +
10557                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10558                 get_stat64(&hw_stats->rx_undersize_packets);
10559
10560         stats->rx_over_errors = old_stats->rx_over_errors +
10561                 get_stat64(&hw_stats->rxbds_empty);
10562         stats->rx_frame_errors = old_stats->rx_frame_errors +
10563                 get_stat64(&hw_stats->rx_align_errors);
10564         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10565                 get_stat64(&hw_stats->tx_discards);
10566         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10567                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10568
10569         stats->rx_crc_errors = old_stats->rx_crc_errors +
10570                 tg3_calc_crc_errors(tp);
10571
10572         stats->rx_missed_errors = old_stats->rx_missed_errors +
10573                 get_stat64(&hw_stats->rx_discards);
10574
10575         stats->rx_dropped = tp->rx_dropped;
10576         stats->tx_dropped = tp->tx_dropped;
10577 }
10578
10579 static int tg3_get_regs_len(struct net_device *dev)
10580 {
10581         return TG3_REG_BLK_SIZE;
10582 }
10583
10584 static void tg3_get_regs(struct net_device *dev,
10585                 struct ethtool_regs *regs, void *_p)
10586 {
10587         struct tg3 *tp = netdev_priv(dev);
10588
10589         regs->version = 0;
10590
10591         memset(_p, 0, TG3_REG_BLK_SIZE);
10592
10593         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10594                 return;
10595
10596         tg3_full_lock(tp, 0);
10597
10598         tg3_dump_legacy_regs(tp, (u32 *)_p);
10599
10600         tg3_full_unlock(tp);
10601 }
10602
10603 static int tg3_get_eeprom_len(struct net_device *dev)
10604 {
10605         struct tg3 *tp = netdev_priv(dev);
10606
10607         return tp->nvram_size;
10608 }
10609
10610 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10611 {
10612         struct tg3 *tp = netdev_priv(dev);
10613         int ret;
10614         u8  *pd;
10615         u32 i, offset, len, b_offset, b_count;
10616         __be32 val;
10617
10618         if (tg3_flag(tp, NO_NVRAM))
10619                 return -EINVAL;
10620
10621         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10622                 return -EAGAIN;
10623
10624         offset = eeprom->offset;
10625         len = eeprom->len;
10626         eeprom->len = 0;
10627
10628         eeprom->magic = TG3_EEPROM_MAGIC;
10629
10630         if (offset & 3) {
10631                 /* adjustments to start on required 4 byte boundary */
10632                 b_offset = offset & 3;
10633                 b_count = 4 - b_offset;
10634                 if (b_count > len) {
10635                         /* i.e. offset=1 len=2 */
10636                         b_count = len;
10637                 }
10638                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10639                 if (ret)
10640                         return ret;
10641                 memcpy(data, ((char *)&val) + b_offset, b_count);
10642                 len -= b_count;
10643                 offset += b_count;
10644                 eeprom->len += b_count;
10645         }
10646
10647         /* read bytes up to the last 4 byte boundary */
10648         pd = &data[eeprom->len];
10649         for (i = 0; i < (len - (len & 3)); i += 4) {
10650                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10651                 if (ret) {
10652                         eeprom->len += i;
10653                         return ret;
10654                 }
10655                 memcpy(pd + i, &val, 4);
10656         }
10657         eeprom->len += i;
10658
10659         if (len & 3) {
10660                 /* read last bytes not ending on 4 byte boundary */
10661                 pd = &data[eeprom->len];
10662                 b_count = len & 3;
10663                 b_offset = offset + len - b_count;
10664                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10665                 if (ret)
10666                         return ret;
10667                 memcpy(pd, &val, b_count);
10668                 eeprom->len += b_count;
10669         }
10670         return 0;
10671 }
10672
10673 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10674 {
10675         struct tg3 *tp = netdev_priv(dev);
10676         int ret;
10677         u32 offset, len, b_offset, odd_len;
10678         u8 *buf;
10679         __be32 start, end;
10680
10681         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10682                 return -EAGAIN;
10683
10684         if (tg3_flag(tp, NO_NVRAM) ||
10685             eeprom->magic != TG3_EEPROM_MAGIC)
10686                 return -EINVAL;
10687
10688         offset = eeprom->offset;
10689         len = eeprom->len;
10690
10691         if ((b_offset = (offset & 3))) {
10692                 /* adjustments to start on required 4 byte boundary */
10693                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10694                 if (ret)
10695                         return ret;
10696                 len += b_offset;
10697                 offset &= ~3;
10698                 if (len < 4)
10699                         len = 4;
10700         }
10701
10702         odd_len = 0;
10703         if (len & 3) {
10704                 /* adjustments to end on required 4 byte boundary */
10705                 odd_len = 1;
10706                 len = (len + 3) & ~3;
10707                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10708                 if (ret)
10709                         return ret;
10710         }
10711
10712         buf = data;
10713         if (b_offset || odd_len) {
10714                 buf = kmalloc(len, GFP_KERNEL);
10715                 if (!buf)
10716                         return -ENOMEM;
10717                 if (b_offset)
10718                         memcpy(buf, &start, 4);
10719                 if (odd_len)
10720                         memcpy(buf+len-4, &end, 4);
10721                 memcpy(buf + b_offset, data, eeprom->len);
10722         }
10723
10724         ret = tg3_nvram_write_block(tp, offset, len, buf);
10725
10726         if (buf != data)
10727                 kfree(buf);
10728
10729         return ret;
10730 }
10731
10732 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10733 {
10734         struct tg3 *tp = netdev_priv(dev);
10735
10736         if (tg3_flag(tp, USE_PHYLIB)) {
10737                 struct phy_device *phydev;
10738                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10739                         return -EAGAIN;
10740                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10741                 return phy_ethtool_gset(phydev, cmd);
10742         }
10743
10744         cmd->supported = (SUPPORTED_Autoneg);
10745
10746         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10747                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10748                                    SUPPORTED_1000baseT_Full);
10749
10750         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10751                 cmd->supported |= (SUPPORTED_100baseT_Half |
10752                                   SUPPORTED_100baseT_Full |
10753                                   SUPPORTED_10baseT_Half |
10754                                   SUPPORTED_10baseT_Full |
10755                                   SUPPORTED_TP);
10756                 cmd->port = PORT_TP;
10757         } else {
10758                 cmd->supported |= SUPPORTED_FIBRE;
10759                 cmd->port = PORT_FIBRE;
10760         }
10761
10762         cmd->advertising = tp->link_config.advertising;
10763         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10764                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10765                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10766                                 cmd->advertising |= ADVERTISED_Pause;
10767                         } else {
10768                                 cmd->advertising |= ADVERTISED_Pause |
10769                                                     ADVERTISED_Asym_Pause;
10770                         }
10771                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10772                         cmd->advertising |= ADVERTISED_Asym_Pause;
10773                 }
10774         }
10775         if (netif_running(dev) && netif_carrier_ok(dev)) {
10776                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10777                 cmd->duplex = tp->link_config.active_duplex;
10778                 cmd->lp_advertising = tp->link_config.rmt_adv;
10779                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10780                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10781                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10782                         else
10783                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10784                 }
10785         } else {
10786                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10787                 cmd->duplex = DUPLEX_UNKNOWN;
10788                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10789         }
10790         cmd->phy_address = tp->phy_addr;
10791         cmd->transceiver = XCVR_INTERNAL;
10792         cmd->autoneg = tp->link_config.autoneg;
10793         cmd->maxtxpkt = 0;
10794         cmd->maxrxpkt = 0;
10795         return 0;
10796 }
10797
10798 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10799 {
10800         struct tg3 *tp = netdev_priv(dev);
10801         u32 speed = ethtool_cmd_speed(cmd);
10802
10803         if (tg3_flag(tp, USE_PHYLIB)) {
10804                 struct phy_device *phydev;
10805                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10806                         return -EAGAIN;
10807                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10808                 return phy_ethtool_sset(phydev, cmd);
10809         }
10810
10811         if (cmd->autoneg != AUTONEG_ENABLE &&
10812             cmd->autoneg != AUTONEG_DISABLE)
10813                 return -EINVAL;
10814
10815         if (cmd->autoneg == AUTONEG_DISABLE &&
10816             cmd->duplex != DUPLEX_FULL &&
10817             cmd->duplex != DUPLEX_HALF)
10818                 return -EINVAL;
10819
10820         if (cmd->autoneg == AUTONEG_ENABLE) {
10821                 u32 mask = ADVERTISED_Autoneg |
10822                            ADVERTISED_Pause |
10823                            ADVERTISED_Asym_Pause;
10824
10825                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10826                         mask |= ADVERTISED_1000baseT_Half |
10827                                 ADVERTISED_1000baseT_Full;
10828
10829                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10830                         mask |= ADVERTISED_100baseT_Half |
10831                                 ADVERTISED_100baseT_Full |
10832                                 ADVERTISED_10baseT_Half |
10833                                 ADVERTISED_10baseT_Full |
10834                                 ADVERTISED_TP;
10835                 else
10836                         mask |= ADVERTISED_FIBRE;
10837
10838                 if (cmd->advertising & ~mask)
10839                         return -EINVAL;
10840
10841                 mask &= (ADVERTISED_1000baseT_Half |
10842                          ADVERTISED_1000baseT_Full |
10843                          ADVERTISED_100baseT_Half |
10844                          ADVERTISED_100baseT_Full |
10845                          ADVERTISED_10baseT_Half |
10846                          ADVERTISED_10baseT_Full);
10847
10848                 cmd->advertising &= mask;
10849         } else {
10850                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10851                         if (speed != SPEED_1000)
10852                                 return -EINVAL;
10853
10854                         if (cmd->duplex != DUPLEX_FULL)
10855                                 return -EINVAL;
10856                 } else {
10857                         if (speed != SPEED_100 &&
10858                             speed != SPEED_10)
10859                                 return -EINVAL;
10860                 }
10861         }
10862
10863         tg3_full_lock(tp, 0);
10864
10865         tp->link_config.autoneg = cmd->autoneg;
10866         if (cmd->autoneg == AUTONEG_ENABLE) {
10867                 tp->link_config.advertising = (cmd->advertising |
10868                                               ADVERTISED_Autoneg);
10869                 tp->link_config.speed = SPEED_UNKNOWN;
10870                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10871         } else {
10872                 tp->link_config.advertising = 0;
10873                 tp->link_config.speed = speed;
10874                 tp->link_config.duplex = cmd->duplex;
10875         }
10876
10877         if (netif_running(dev))
10878                 tg3_setup_phy(tp, 1);
10879
10880         tg3_full_unlock(tp);
10881
10882         return 0;
10883 }
10884
10885 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10886 {
10887         struct tg3 *tp = netdev_priv(dev);
10888
10889         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10890         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10891         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10892         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10893 }
10894
10895 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10896 {
10897         struct tg3 *tp = netdev_priv(dev);
10898
10899         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10900                 wol->supported = WAKE_MAGIC;
10901         else
10902                 wol->supported = 0;
10903         wol->wolopts = 0;
10904         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10905                 wol->wolopts = WAKE_MAGIC;
10906         memset(&wol->sopass, 0, sizeof(wol->sopass));
10907 }
10908
10909 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10910 {
10911         struct tg3 *tp = netdev_priv(dev);
10912         struct device *dp = &tp->pdev->dev;
10913
10914         if (wol->wolopts & ~WAKE_MAGIC)
10915                 return -EINVAL;
10916         if ((wol->wolopts & WAKE_MAGIC) &&
10917             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10918                 return -EINVAL;
10919
10920         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10921
10922         spin_lock_bh(&tp->lock);
10923         if (device_may_wakeup(dp))
10924                 tg3_flag_set(tp, WOL_ENABLE);
10925         else
10926                 tg3_flag_clear(tp, WOL_ENABLE);
10927         spin_unlock_bh(&tp->lock);
10928
10929         return 0;
10930 }
10931
10932 static u32 tg3_get_msglevel(struct net_device *dev)
10933 {
10934         struct tg3 *tp = netdev_priv(dev);
10935         return tp->msg_enable;
10936 }
10937
10938 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10939 {
10940         struct tg3 *tp = netdev_priv(dev);
10941         tp->msg_enable = value;
10942 }
10943
10944 static int tg3_nway_reset(struct net_device *dev)
10945 {
10946         struct tg3 *tp = netdev_priv(dev);
10947         int r;
10948
10949         if (!netif_running(dev))
10950                 return -EAGAIN;
10951
10952         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10953                 return -EINVAL;
10954
10955         if (tg3_flag(tp, USE_PHYLIB)) {
10956                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10957                         return -EAGAIN;
10958                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10959         } else {
10960                 u32 bmcr;
10961
10962                 spin_lock_bh(&tp->lock);
10963                 r = -EINVAL;
10964                 tg3_readphy(tp, MII_BMCR, &bmcr);
10965                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10966                     ((bmcr & BMCR_ANENABLE) ||
10967                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10968                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10969                                                    BMCR_ANENABLE);
10970                         r = 0;
10971                 }
10972                 spin_unlock_bh(&tp->lock);
10973         }
10974
10975         return r;
10976 }
10977
10978 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10979 {
10980         struct tg3 *tp = netdev_priv(dev);
10981
10982         ering->rx_max_pending = tp->rx_std_ring_mask;
10983         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10984                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10985         else
10986                 ering->rx_jumbo_max_pending = 0;
10987
10988         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10989
10990         ering->rx_pending = tp->rx_pending;
10991         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10992                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10993         else
10994                 ering->rx_jumbo_pending = 0;
10995
10996         ering->tx_pending = tp->napi[0].tx_pending;
10997 }
10998
10999 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11000 {
11001         struct tg3 *tp = netdev_priv(dev);
11002         int i, irq_sync = 0, err = 0;
11003
11004         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11005             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11006             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11007             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11008             (tg3_flag(tp, TSO_BUG) &&
11009              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11010                 return -EINVAL;
11011
11012         if (netif_running(dev)) {
11013                 tg3_phy_stop(tp);
11014                 tg3_netif_stop(tp);
11015                 irq_sync = 1;
11016         }
11017
11018         tg3_full_lock(tp, irq_sync);
11019
11020         tp->rx_pending = ering->rx_pending;
11021
11022         if (tg3_flag(tp, MAX_RXPEND_64) &&
11023             tp->rx_pending > 63)
11024                 tp->rx_pending = 63;
11025         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11026
11027         for (i = 0; i < tp->irq_max; i++)
11028                 tp->napi[i].tx_pending = ering->tx_pending;
11029
11030         if (netif_running(dev)) {
11031                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11032                 err = tg3_restart_hw(tp, 1);
11033                 if (!err)
11034                         tg3_netif_start(tp);
11035         }
11036
11037         tg3_full_unlock(tp);
11038
11039         if (irq_sync && !err)
11040                 tg3_phy_start(tp);
11041
11042         return err;
11043 }
11044
11045 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11046 {
11047         struct tg3 *tp = netdev_priv(dev);
11048
11049         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11050
11051         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11052                 epause->rx_pause = 1;
11053         else
11054                 epause->rx_pause = 0;
11055
11056         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11057                 epause->tx_pause = 1;
11058         else
11059                 epause->tx_pause = 0;
11060 }
11061
11062 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11063 {
11064         struct tg3 *tp = netdev_priv(dev);
11065         int err = 0;
11066
11067         if (tg3_flag(tp, USE_PHYLIB)) {
11068                 u32 newadv;
11069                 struct phy_device *phydev;
11070
11071                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11072
11073                 if (!(phydev->supported & SUPPORTED_Pause) ||
11074                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11075                      (epause->rx_pause != epause->tx_pause)))
11076                         return -EINVAL;
11077
11078                 tp->link_config.flowctrl = 0;
11079                 if (epause->rx_pause) {
11080                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11081
11082                         if (epause->tx_pause) {
11083                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11084                                 newadv = ADVERTISED_Pause;
11085                         } else
11086                                 newadv = ADVERTISED_Pause |
11087                                          ADVERTISED_Asym_Pause;
11088                 } else if (epause->tx_pause) {
11089                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11090                         newadv = ADVERTISED_Asym_Pause;
11091                 } else
11092                         newadv = 0;
11093
11094                 if (epause->autoneg)
11095                         tg3_flag_set(tp, PAUSE_AUTONEG);
11096                 else
11097                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11098
11099                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11100                         u32 oldadv = phydev->advertising &
11101                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11102                         if (oldadv != newadv) {
11103                                 phydev->advertising &=
11104                                         ~(ADVERTISED_Pause |
11105                                           ADVERTISED_Asym_Pause);
11106                                 phydev->advertising |= newadv;
11107                                 if (phydev->autoneg) {
11108                                         /*
11109                                          * Always renegotiate the link to
11110                                          * inform our link partner of our
11111                                          * flow control settings, even if the
11112                                          * flow control is forced.  Let
11113                                          * tg3_adjust_link() do the final
11114                                          * flow control setup.
11115                                          */
11116                                         return phy_start_aneg(phydev);
11117                                 }
11118                         }
11119
11120                         if (!epause->autoneg)
11121                                 tg3_setup_flow_control(tp, 0, 0);
11122                 } else {
11123                         tp->link_config.advertising &=
11124                                         ~(ADVERTISED_Pause |
11125                                           ADVERTISED_Asym_Pause);
11126                         tp->link_config.advertising |= newadv;
11127                 }
11128         } else {
11129                 int irq_sync = 0;
11130
11131                 if (netif_running(dev)) {
11132                         tg3_netif_stop(tp);
11133                         irq_sync = 1;
11134                 }
11135
11136                 tg3_full_lock(tp, irq_sync);
11137
11138                 if (epause->autoneg)
11139                         tg3_flag_set(tp, PAUSE_AUTONEG);
11140                 else
11141                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11142                 if (epause->rx_pause)
11143                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11144                 else
11145                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11146                 if (epause->tx_pause)
11147                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11148                 else
11149                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11150
11151                 if (netif_running(dev)) {
11152                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11153                         err = tg3_restart_hw(tp, 1);
11154                         if (!err)
11155                                 tg3_netif_start(tp);
11156                 }
11157
11158                 tg3_full_unlock(tp);
11159         }
11160
11161         return err;
11162 }
11163
11164 static int tg3_get_sset_count(struct net_device *dev, int sset)
11165 {
11166         switch (sset) {
11167         case ETH_SS_TEST:
11168                 return TG3_NUM_TEST;
11169         case ETH_SS_STATS:
11170                 return TG3_NUM_STATS;
11171         default:
11172                 return -EOPNOTSUPP;
11173         }
11174 }
11175
11176 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11177                          u32 *rules __always_unused)
11178 {
11179         struct tg3 *tp = netdev_priv(dev);
11180
11181         if (!tg3_flag(tp, SUPPORT_MSIX))
11182                 return -EOPNOTSUPP;
11183
11184         switch (info->cmd) {
11185         case ETHTOOL_GRXRINGS:
11186                 if (netif_running(tp->dev))
11187                         info->data = tp->irq_cnt;
11188                 else {
11189                         info->data = num_online_cpus();
11190                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
11191                                 info->data = TG3_IRQ_MAX_VECS_RSS;
11192                 }
11193
11194                 /* The first interrupt vector only
11195                  * handles link interrupts.
11196                  */
11197                 info->data -= 1;
11198                 return 0;
11199
11200         default:
11201                 return -EOPNOTSUPP;
11202         }
11203 }
11204
11205 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11206 {
11207         u32 size = 0;
11208         struct tg3 *tp = netdev_priv(dev);
11209
11210         if (tg3_flag(tp, SUPPORT_MSIX))
11211                 size = TG3_RSS_INDIR_TBL_SIZE;
11212
11213         return size;
11214 }
11215
11216 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11217 {
11218         struct tg3 *tp = netdev_priv(dev);
11219         int i;
11220
11221         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11222                 indir[i] = tp->rss_ind_tbl[i];
11223
11224         return 0;
11225 }
11226
11227 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11228 {
11229         struct tg3 *tp = netdev_priv(dev);
11230         size_t i;
11231
11232         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11233                 tp->rss_ind_tbl[i] = indir[i];
11234
11235         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11236                 return 0;
11237
11238         /* It is legal to write the indirection
11239          * table while the device is running.
11240          */
11241         tg3_full_lock(tp, 0);
11242         tg3_rss_write_indir_tbl(tp);
11243         tg3_full_unlock(tp);
11244
11245         return 0;
11246 }
11247
11248 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11249 {
11250         switch (stringset) {
11251         case ETH_SS_STATS:
11252                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11253                 break;
11254         case ETH_SS_TEST:
11255                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11256                 break;
11257         default:
11258                 WARN_ON(1);     /* we need a WARN() */
11259                 break;
11260         }
11261 }
11262
11263 static int tg3_set_phys_id(struct net_device *dev,
11264                             enum ethtool_phys_id_state state)
11265 {
11266         struct tg3 *tp = netdev_priv(dev);
11267
11268         if (!netif_running(tp->dev))
11269                 return -EAGAIN;
11270
11271         switch (state) {
11272         case ETHTOOL_ID_ACTIVE:
11273                 return 1;       /* cycle on/off once per second */
11274
11275         case ETHTOOL_ID_ON:
11276                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11277                      LED_CTRL_1000MBPS_ON |
11278                      LED_CTRL_100MBPS_ON |
11279                      LED_CTRL_10MBPS_ON |
11280                      LED_CTRL_TRAFFIC_OVERRIDE |
11281                      LED_CTRL_TRAFFIC_BLINK |
11282                      LED_CTRL_TRAFFIC_LED);
11283                 break;
11284
11285         case ETHTOOL_ID_OFF:
11286                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11287                      LED_CTRL_TRAFFIC_OVERRIDE);
11288                 break;
11289
11290         case ETHTOOL_ID_INACTIVE:
11291                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11292                 break;
11293         }
11294
11295         return 0;
11296 }
11297
11298 static void tg3_get_ethtool_stats(struct net_device *dev,
11299                                    struct ethtool_stats *estats, u64 *tmp_stats)
11300 {
11301         struct tg3 *tp = netdev_priv(dev);
11302
11303         if (tp->hw_stats)
11304                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11305         else
11306                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11307 }
11308
11309 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11310 {
11311         int i;
11312         __be32 *buf;
11313         u32 offset = 0, len = 0;
11314         u32 magic, val;
11315
11316         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11317                 return NULL;
11318
11319         if (magic == TG3_EEPROM_MAGIC) {
11320                 for (offset = TG3_NVM_DIR_START;
11321                      offset < TG3_NVM_DIR_END;
11322                      offset += TG3_NVM_DIRENT_SIZE) {
11323                         if (tg3_nvram_read(tp, offset, &val))
11324                                 return NULL;
11325
11326                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11327                             TG3_NVM_DIRTYPE_EXTVPD)
11328                                 break;
11329                 }
11330
11331                 if (offset != TG3_NVM_DIR_END) {
11332                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11333                         if (tg3_nvram_read(tp, offset + 4, &offset))
11334                                 return NULL;
11335
11336                         offset = tg3_nvram_logical_addr(tp, offset);
11337                 }
11338         }
11339
11340         if (!offset || !len) {
11341                 offset = TG3_NVM_VPD_OFF;
11342                 len = TG3_NVM_VPD_LEN;
11343         }
11344
11345         buf = kmalloc(len, GFP_KERNEL);
11346         if (buf == NULL)
11347                 return NULL;
11348
11349         if (magic == TG3_EEPROM_MAGIC) {
11350                 for (i = 0; i < len; i += 4) {
11351                         /* The data is in little-endian format in NVRAM.
11352                          * Use the big-endian read routines to preserve
11353                          * the byte order as it exists in NVRAM.
11354                          */
11355                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11356                                 goto error;
11357                 }
11358         } else {
11359                 u8 *ptr;
11360                 ssize_t cnt;
11361                 unsigned int pos = 0;
11362
11363                 ptr = (u8 *)&buf[0];
11364                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11365                         cnt = pci_read_vpd(tp->pdev, pos,
11366                                            len - pos, ptr);
11367                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11368                                 cnt = 0;
11369                         else if (cnt < 0)
11370                                 goto error;
11371                 }
11372                 if (pos != len)
11373                         goto error;
11374         }
11375
11376         *vpdlen = len;
11377
11378         return buf;
11379
11380 error:
11381         kfree(buf);
11382         return NULL;
11383 }
11384
11385 #define NVRAM_TEST_SIZE 0x100
11386 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11387 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11388 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11389 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11390 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11391 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11392 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11393 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11394
11395 static int tg3_test_nvram(struct tg3 *tp)
11396 {
11397         u32 csum, magic, len;
11398         __be32 *buf;
11399         int i, j, k, err = 0, size;
11400
11401         if (tg3_flag(tp, NO_NVRAM))
11402                 return 0;
11403
11404         if (tg3_nvram_read(tp, 0, &magic) != 0)
11405                 return -EIO;
11406
11407         if (magic == TG3_EEPROM_MAGIC)
11408                 size = NVRAM_TEST_SIZE;
11409         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11410                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11411                     TG3_EEPROM_SB_FORMAT_1) {
11412                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11413                         case TG3_EEPROM_SB_REVISION_0:
11414                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11415                                 break;
11416                         case TG3_EEPROM_SB_REVISION_2:
11417                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11418                                 break;
11419                         case TG3_EEPROM_SB_REVISION_3:
11420                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11421                                 break;
11422                         case TG3_EEPROM_SB_REVISION_4:
11423                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11424                                 break;
11425                         case TG3_EEPROM_SB_REVISION_5:
11426                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11427                                 break;
11428                         case TG3_EEPROM_SB_REVISION_6:
11429                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11430                                 break;
11431                         default:
11432                                 return -EIO;
11433                         }
11434                 } else
11435                         return 0;
11436         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11437                 size = NVRAM_SELFBOOT_HW_SIZE;
11438         else
11439                 return -EIO;
11440
11441         buf = kmalloc(size, GFP_KERNEL);
11442         if (buf == NULL)
11443                 return -ENOMEM;
11444
11445         err = -EIO;
11446         for (i = 0, j = 0; i < size; i += 4, j++) {
11447                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11448                 if (err)
11449                         break;
11450         }
11451         if (i < size)
11452                 goto out;
11453
11454         /* Selfboot format */
11455         magic = be32_to_cpu(buf[0]);
11456         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11457             TG3_EEPROM_MAGIC_FW) {
11458                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11459
11460                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11461                     TG3_EEPROM_SB_REVISION_2) {
11462                         /* For rev 2, the csum doesn't include the MBA. */
11463                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11464                                 csum8 += buf8[i];
11465                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11466                                 csum8 += buf8[i];
11467                 } else {
11468                         for (i = 0; i < size; i++)
11469                                 csum8 += buf8[i];
11470                 }
11471
11472                 if (csum8 == 0) {
11473                         err = 0;
11474                         goto out;
11475                 }
11476
11477                 err = -EIO;
11478                 goto out;
11479         }
11480
11481         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11482             TG3_EEPROM_MAGIC_HW) {
11483                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11484                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11485                 u8 *buf8 = (u8 *) buf;
11486
11487                 /* Separate the parity bits and the data bytes.  */
11488                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11489                         if ((i == 0) || (i == 8)) {
11490                                 int l;
11491                                 u8 msk;
11492
11493                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11494                                         parity[k++] = buf8[i] & msk;
11495                                 i++;
11496                         } else if (i == 16) {
11497                                 int l;
11498                                 u8 msk;
11499
11500                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11501                                         parity[k++] = buf8[i] & msk;
11502                                 i++;
11503
11504                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11505                                         parity[k++] = buf8[i] & msk;
11506                                 i++;
11507                         }
11508                         data[j++] = buf8[i];
11509                 }
11510
11511                 err = -EIO;
11512                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11513                         u8 hw8 = hweight8(data[i]);
11514
11515                         if ((hw8 & 0x1) && parity[i])
11516                                 goto out;
11517                         else if (!(hw8 & 0x1) && !parity[i])
11518                                 goto out;
11519                 }
11520                 err = 0;
11521                 goto out;
11522         }
11523
11524         err = -EIO;
11525
11526         /* Bootstrap checksum at offset 0x10 */
11527         csum = calc_crc((unsigned char *) buf, 0x10);
11528         if (csum != le32_to_cpu(buf[0x10/4]))
11529                 goto out;
11530
11531         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11532         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11533         if (csum != le32_to_cpu(buf[0xfc/4]))
11534                 goto out;
11535
11536         kfree(buf);
11537
11538         buf = tg3_vpd_readblock(tp, &len);
11539         if (!buf)
11540                 return -ENOMEM;
11541
11542         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11543         if (i > 0) {
11544                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11545                 if (j < 0)
11546                         goto out;
11547
11548                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11549                         goto out;
11550
11551                 i += PCI_VPD_LRDT_TAG_SIZE;
11552                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11553                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11554                 if (j > 0) {
11555                         u8 csum8 = 0;
11556
11557                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11558
11559                         for (i = 0; i <= j; i++)
11560                                 csum8 += ((u8 *)buf)[i];
11561
11562                         if (csum8)
11563                                 goto out;
11564                 }
11565         }
11566
11567         err = 0;
11568
11569 out:
11570         kfree(buf);
11571         return err;
11572 }
11573
11574 #define TG3_SERDES_TIMEOUT_SEC  2
11575 #define TG3_COPPER_TIMEOUT_SEC  6
11576
11577 static int tg3_test_link(struct tg3 *tp)
11578 {
11579         int i, max;
11580
11581         if (!netif_running(tp->dev))
11582                 return -ENODEV;
11583
11584         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11585                 max = TG3_SERDES_TIMEOUT_SEC;
11586         else
11587                 max = TG3_COPPER_TIMEOUT_SEC;
11588
11589         for (i = 0; i < max; i++) {
11590                 if (netif_carrier_ok(tp->dev))
11591                         return 0;
11592
11593                 if (msleep_interruptible(1000))
11594                         break;
11595         }
11596
11597         return -EIO;
11598 }
11599
11600 /* Only test the commonly used registers */
11601 static int tg3_test_registers(struct tg3 *tp)
11602 {
11603         int i, is_5705, is_5750;
11604         u32 offset, read_mask, write_mask, val, save_val, read_val;
11605         static struct {
11606                 u16 offset;
11607                 u16 flags;
11608 #define TG3_FL_5705     0x1
11609 #define TG3_FL_NOT_5705 0x2
11610 #define TG3_FL_NOT_5788 0x4
11611 #define TG3_FL_NOT_5750 0x8
11612                 u32 read_mask;
11613                 u32 write_mask;
11614         } reg_tbl[] = {
11615                 /* MAC Control Registers */
11616                 { MAC_MODE, TG3_FL_NOT_5705,
11617                         0x00000000, 0x00ef6f8c },
11618                 { MAC_MODE, TG3_FL_5705,
11619                         0x00000000, 0x01ef6b8c },
11620                 { MAC_STATUS, TG3_FL_NOT_5705,
11621                         0x03800107, 0x00000000 },
11622                 { MAC_STATUS, TG3_FL_5705,
11623                         0x03800100, 0x00000000 },
11624                 { MAC_ADDR_0_HIGH, 0x0000,
11625                         0x00000000, 0x0000ffff },
11626                 { MAC_ADDR_0_LOW, 0x0000,
11627                         0x00000000, 0xffffffff },
11628                 { MAC_RX_MTU_SIZE, 0x0000,
11629                         0x00000000, 0x0000ffff },
11630                 { MAC_TX_MODE, 0x0000,
11631                         0x00000000, 0x00000070 },
11632                 { MAC_TX_LENGTHS, 0x0000,
11633                         0x00000000, 0x00003fff },
11634                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11635                         0x00000000, 0x000007fc },
11636                 { MAC_RX_MODE, TG3_FL_5705,
11637                         0x00000000, 0x000007dc },
11638                 { MAC_HASH_REG_0, 0x0000,
11639                         0x00000000, 0xffffffff },
11640                 { MAC_HASH_REG_1, 0x0000,
11641                         0x00000000, 0xffffffff },
11642                 { MAC_HASH_REG_2, 0x0000,
11643                         0x00000000, 0xffffffff },
11644                 { MAC_HASH_REG_3, 0x0000,
11645                         0x00000000, 0xffffffff },
11646
11647                 /* Receive Data and Receive BD Initiator Control Registers. */
11648                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11649                         0x00000000, 0xffffffff },
11650                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11651                         0x00000000, 0xffffffff },
11652                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11653                         0x00000000, 0x00000003 },
11654                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11655                         0x00000000, 0xffffffff },
11656                 { RCVDBDI_STD_BD+0, 0x0000,
11657                         0x00000000, 0xffffffff },
11658                 { RCVDBDI_STD_BD+4, 0x0000,
11659                         0x00000000, 0xffffffff },
11660                 { RCVDBDI_STD_BD+8, 0x0000,
11661                         0x00000000, 0xffff0002 },
11662                 { RCVDBDI_STD_BD+0xc, 0x0000,
11663                         0x00000000, 0xffffffff },
11664
11665                 /* Receive BD Initiator Control Registers. */
11666                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11667                         0x00000000, 0xffffffff },
11668                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11669                         0x00000000, 0x000003ff },
11670                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11671                         0x00000000, 0xffffffff },
11672
11673                 /* Host Coalescing Control Registers. */
11674                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11675                         0x00000000, 0x00000004 },
11676                 { HOSTCC_MODE, TG3_FL_5705,
11677                         0x00000000, 0x000000f6 },
11678                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11679                         0x00000000, 0xffffffff },
11680                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11681                         0x00000000, 0x000003ff },
11682                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11683                         0x00000000, 0xffffffff },
11684                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11685                         0x00000000, 0x000003ff },
11686                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11687                         0x00000000, 0xffffffff },
11688                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11689                         0x00000000, 0x000000ff },
11690                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11691                         0x00000000, 0xffffffff },
11692                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11693                         0x00000000, 0x000000ff },
11694                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11695                         0x00000000, 0xffffffff },
11696                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11697                         0x00000000, 0xffffffff },
11698                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11699                         0x00000000, 0xffffffff },
11700                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11701                         0x00000000, 0x000000ff },
11702                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11703                         0x00000000, 0xffffffff },
11704                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11705                         0x00000000, 0x000000ff },
11706                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11707                         0x00000000, 0xffffffff },
11708                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11709                         0x00000000, 0xffffffff },
11710                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11711                         0x00000000, 0xffffffff },
11712                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11713                         0x00000000, 0xffffffff },
11714                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11715                         0x00000000, 0xffffffff },
11716                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11717                         0xffffffff, 0x00000000 },
11718                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11719                         0xffffffff, 0x00000000 },
11720
11721                 /* Buffer Manager Control Registers. */
11722                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11723                         0x00000000, 0x007fff80 },
11724                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11725                         0x00000000, 0x007fffff },
11726                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11727                         0x00000000, 0x0000003f },
11728                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11729                         0x00000000, 0x000001ff },
11730                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11731                         0x00000000, 0x000001ff },
11732                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11733                         0xffffffff, 0x00000000 },
11734                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11735                         0xffffffff, 0x00000000 },
11736
11737                 /* Mailbox Registers */
11738                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11739                         0x00000000, 0x000001ff },
11740                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11741                         0x00000000, 0x000001ff },
11742                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11743                         0x00000000, 0x000007ff },
11744                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11745                         0x00000000, 0x000001ff },
11746
11747                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11748         };
11749
11750         is_5705 = is_5750 = 0;
11751         if (tg3_flag(tp, 5705_PLUS)) {
11752                 is_5705 = 1;
11753                 if (tg3_flag(tp, 5750_PLUS))
11754                         is_5750 = 1;
11755         }
11756
11757         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11758                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11759                         continue;
11760
11761                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11762                         continue;
11763
11764                 if (tg3_flag(tp, IS_5788) &&
11765                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11766                         continue;
11767
11768                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11769                         continue;
11770
11771                 offset = (u32) reg_tbl[i].offset;
11772                 read_mask = reg_tbl[i].read_mask;
11773                 write_mask = reg_tbl[i].write_mask;
11774
11775                 /* Save the original register content */
11776                 save_val = tr32(offset);
11777
11778                 /* Determine the read-only value. */
11779                 read_val = save_val & read_mask;
11780
11781                 /* Write zero to the register, then make sure the read-only bits
11782                  * are not changed and the read/write bits are all zeros.
11783                  */
11784                 tw32(offset, 0);
11785
11786                 val = tr32(offset);
11787
11788                 /* Test the read-only and read/write bits. */
11789                 if (((val & read_mask) != read_val) || (val & write_mask))
11790                         goto out;
11791
11792                 /* Write ones to all the bits defined by RdMask and WrMask, then
11793                  * make sure the read-only bits are not changed and the
11794                  * read/write bits are all ones.
11795                  */
11796                 tw32(offset, read_mask | write_mask);
11797
11798                 val = tr32(offset);
11799
11800                 /* Test the read-only bits. */
11801                 if ((val & read_mask) != read_val)
11802                         goto out;
11803
11804                 /* Test the read/write bits. */
11805                 if ((val & write_mask) != write_mask)
11806                         goto out;
11807
11808                 tw32(offset, save_val);
11809         }
11810
11811         return 0;
11812
11813 out:
11814         if (netif_msg_hw(tp))
11815                 netdev_err(tp->dev,
11816                            "Register test failed at offset %x\n", offset);
11817         tw32(offset, save_val);
11818         return -EIO;
11819 }
11820
11821 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11822 {
11823         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11824         int i;
11825         u32 j;
11826
11827         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11828                 for (j = 0; j < len; j += 4) {
11829                         u32 val;
11830
11831                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11832                         tg3_read_mem(tp, offset + j, &val);
11833                         if (val != test_pattern[i])
11834                                 return -EIO;
11835                 }
11836         }
11837         return 0;
11838 }
11839
11840 static int tg3_test_memory(struct tg3 *tp)
11841 {
11842         static struct mem_entry {
11843                 u32 offset;
11844                 u32 len;
11845         } mem_tbl_570x[] = {
11846                 { 0x00000000, 0x00b50},
11847                 { 0x00002000, 0x1c000},
11848                 { 0xffffffff, 0x00000}
11849         }, mem_tbl_5705[] = {
11850                 { 0x00000100, 0x0000c},
11851                 { 0x00000200, 0x00008},
11852                 { 0x00004000, 0x00800},
11853                 { 0x00006000, 0x01000},
11854                 { 0x00008000, 0x02000},
11855                 { 0x00010000, 0x0e000},
11856                 { 0xffffffff, 0x00000}
11857         }, mem_tbl_5755[] = {
11858                 { 0x00000200, 0x00008},
11859                 { 0x00004000, 0x00800},
11860                 { 0x00006000, 0x00800},
11861                 { 0x00008000, 0x02000},
11862                 { 0x00010000, 0x0c000},
11863                 { 0xffffffff, 0x00000}
11864         }, mem_tbl_5906[] = {
11865                 { 0x00000200, 0x00008},
11866                 { 0x00004000, 0x00400},
11867                 { 0x00006000, 0x00400},
11868                 { 0x00008000, 0x01000},
11869                 { 0x00010000, 0x01000},
11870                 { 0xffffffff, 0x00000}
11871         }, mem_tbl_5717[] = {
11872                 { 0x00000200, 0x00008},
11873                 { 0x00010000, 0x0a000},
11874                 { 0x00020000, 0x13c00},
11875                 { 0xffffffff, 0x00000}
11876         }, mem_tbl_57765[] = {
11877                 { 0x00000200, 0x00008},
11878                 { 0x00004000, 0x00800},
11879                 { 0x00006000, 0x09800},
11880                 { 0x00010000, 0x0a000},
11881                 { 0xffffffff, 0x00000}
11882         };
11883         struct mem_entry *mem_tbl;
11884         int err = 0;
11885         int i;
11886
11887         if (tg3_flag(tp, 5717_PLUS))
11888                 mem_tbl = mem_tbl_5717;
11889         else if (tg3_flag(tp, 57765_CLASS))
11890                 mem_tbl = mem_tbl_57765;
11891         else if (tg3_flag(tp, 5755_PLUS))
11892                 mem_tbl = mem_tbl_5755;
11893         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11894                 mem_tbl = mem_tbl_5906;
11895         else if (tg3_flag(tp, 5705_PLUS))
11896                 mem_tbl = mem_tbl_5705;
11897         else
11898                 mem_tbl = mem_tbl_570x;
11899
11900         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11901                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11902                 if (err)
11903                         break;
11904         }
11905
11906         return err;
11907 }
11908
11909 #define TG3_TSO_MSS             500
11910
11911 #define TG3_TSO_IP_HDR_LEN      20
11912 #define TG3_TSO_TCP_HDR_LEN     20
11913 #define TG3_TSO_TCP_OPT_LEN     12
11914
11915 static const u8 tg3_tso_header[] = {
11916 0x08, 0x00,
11917 0x45, 0x00, 0x00, 0x00,
11918 0x00, 0x00, 0x40, 0x00,
11919 0x40, 0x06, 0x00, 0x00,
11920 0x0a, 0x00, 0x00, 0x01,
11921 0x0a, 0x00, 0x00, 0x02,
11922 0x0d, 0x00, 0xe0, 0x00,
11923 0x00, 0x00, 0x01, 0x00,
11924 0x00, 0x00, 0x02, 0x00,
11925 0x80, 0x10, 0x10, 0x00,
11926 0x14, 0x09, 0x00, 0x00,
11927 0x01, 0x01, 0x08, 0x0a,
11928 0x11, 0x11, 0x11, 0x11,
11929 0x11, 0x11, 0x11, 0x11,
11930 };
11931
11932 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11933 {
11934         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11935         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11936         u32 budget;
11937         struct sk_buff *skb;
11938         u8 *tx_data, *rx_data;
11939         dma_addr_t map;
11940         int num_pkts, tx_len, rx_len, i, err;
11941         struct tg3_rx_buffer_desc *desc;
11942         struct tg3_napi *tnapi, *rnapi;
11943         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11944
11945         tnapi = &tp->napi[0];
11946         rnapi = &tp->napi[0];
11947         if (tp->irq_cnt > 1) {
11948                 if (tg3_flag(tp, ENABLE_RSS))
11949                         rnapi = &tp->napi[1];
11950                 if (tg3_flag(tp, ENABLE_TSS))
11951                         tnapi = &tp->napi[1];
11952         }
11953         coal_now = tnapi->coal_now | rnapi->coal_now;
11954
11955         err = -EIO;
11956
11957         tx_len = pktsz;
11958         skb = netdev_alloc_skb(tp->dev, tx_len);
11959         if (!skb)
11960                 return -ENOMEM;
11961
11962         tx_data = skb_put(skb, tx_len);
11963         memcpy(tx_data, tp->dev->dev_addr, 6);
11964         memset(tx_data + 6, 0x0, 8);
11965
11966         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11967
11968         if (tso_loopback) {
11969                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11970
11971                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11972                               TG3_TSO_TCP_OPT_LEN;
11973
11974                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11975                        sizeof(tg3_tso_header));
11976                 mss = TG3_TSO_MSS;
11977
11978                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11979                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11980
11981                 /* Set the total length field in the IP header */
11982                 iph->tot_len = htons((u16)(mss + hdr_len));
11983
11984                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11985                               TXD_FLAG_CPU_POST_DMA);
11986
11987                 if (tg3_flag(tp, HW_TSO_1) ||
11988                     tg3_flag(tp, HW_TSO_2) ||
11989                     tg3_flag(tp, HW_TSO_3)) {
11990                         struct tcphdr *th;
11991                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11992                         th = (struct tcphdr *)&tx_data[val];
11993                         th->check = 0;
11994                 } else
11995                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11996
11997                 if (tg3_flag(tp, HW_TSO_3)) {
11998                         mss |= (hdr_len & 0xc) << 12;
11999                         if (hdr_len & 0x10)
12000                                 base_flags |= 0x00000010;
12001                         base_flags |= (hdr_len & 0x3e0) << 5;
12002                 } else if (tg3_flag(tp, HW_TSO_2))
12003                         mss |= hdr_len << 9;
12004                 else if (tg3_flag(tp, HW_TSO_1) ||
12005                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12006                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12007                 } else {
12008                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12009                 }
12010
12011                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12012         } else {
12013                 num_pkts = 1;
12014                 data_off = ETH_HLEN;
12015
12016                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12017                     tx_len > VLAN_ETH_FRAME_LEN)
12018                         base_flags |= TXD_FLAG_JMB_PKT;
12019         }
12020
12021         for (i = data_off; i < tx_len; i++)
12022                 tx_data[i] = (u8) (i & 0xff);
12023
12024         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12025         if (pci_dma_mapping_error(tp->pdev, map)) {
12026                 dev_kfree_skb(skb);
12027                 return -EIO;
12028         }
12029
12030         val = tnapi->tx_prod;
12031         tnapi->tx_buffers[val].skb = skb;
12032         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12033
12034         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12035                rnapi->coal_now);
12036
12037         udelay(10);
12038
12039         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12040
12041         budget = tg3_tx_avail(tnapi);
12042         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12043                             base_flags | TXD_FLAG_END, mss, 0)) {
12044                 tnapi->tx_buffers[val].skb = NULL;
12045                 dev_kfree_skb(skb);
12046                 return -EIO;
12047         }
12048
12049         tnapi->tx_prod++;
12050
12051         /* Sync BD data before updating mailbox */
12052         wmb();
12053
12054         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12055         tr32_mailbox(tnapi->prodmbox);
12056
12057         udelay(10);
12058
12059         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12060         for (i = 0; i < 35; i++) {
12061                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12062                        coal_now);
12063
12064                 udelay(10);
12065
12066                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12067                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12068                 if ((tx_idx == tnapi->tx_prod) &&
12069                     (rx_idx == (rx_start_idx + num_pkts)))
12070                         break;
12071         }
12072
12073         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12074         dev_kfree_skb(skb);
12075
12076         if (tx_idx != tnapi->tx_prod)
12077                 goto out;
12078
12079         if (rx_idx != rx_start_idx + num_pkts)
12080                 goto out;
12081
12082         val = data_off;
12083         while (rx_idx != rx_start_idx) {
12084                 desc = &rnapi->rx_rcb[rx_start_idx++];
12085                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12086                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12087
12088                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12089                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12090                         goto out;
12091
12092                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12093                          - ETH_FCS_LEN;
12094
12095                 if (!tso_loopback) {
12096                         if (rx_len != tx_len)
12097                                 goto out;
12098
12099                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12100                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12101                                         goto out;
12102                         } else {
12103                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12104                                         goto out;
12105                         }
12106                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12107                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12108                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12109                         goto out;
12110                 }
12111
12112                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12113                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12114                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12115                                              mapping);
12116                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12117                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12118                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12119                                              mapping);
12120                 } else
12121                         goto out;
12122
12123                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12124                                             PCI_DMA_FROMDEVICE);
12125
12126                 rx_data += TG3_RX_OFFSET(tp);
12127                 for (i = data_off; i < rx_len; i++, val++) {
12128                         if (*(rx_data + i) != (u8) (val & 0xff))
12129                                 goto out;
12130                 }
12131         }
12132
12133         err = 0;
12134
12135         /* tg3_free_rings will unmap and free the rx_data */
12136 out:
12137         return err;
12138 }
12139
12140 #define TG3_STD_LOOPBACK_FAILED         1
12141 #define TG3_JMB_LOOPBACK_FAILED         2
12142 #define TG3_TSO_LOOPBACK_FAILED         4
12143 #define TG3_LOOPBACK_FAILED \
12144         (TG3_STD_LOOPBACK_FAILED | \
12145          TG3_JMB_LOOPBACK_FAILED | \
12146          TG3_TSO_LOOPBACK_FAILED)
12147
12148 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12149 {
12150         int err = -EIO;
12151         u32 eee_cap;
12152         u32 jmb_pkt_sz = 9000;
12153
12154         if (tp->dma_limit)
12155                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12156
12157         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12158         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12159
12160         if (!netif_running(tp->dev)) {
12161                 data[0] = TG3_LOOPBACK_FAILED;
12162                 data[1] = TG3_LOOPBACK_FAILED;
12163                 if (do_extlpbk)
12164                         data[2] = TG3_LOOPBACK_FAILED;
12165                 goto done;
12166         }
12167
12168         err = tg3_reset_hw(tp, 1);
12169         if (err) {
12170                 data[0] = TG3_LOOPBACK_FAILED;
12171                 data[1] = TG3_LOOPBACK_FAILED;
12172                 if (do_extlpbk)
12173                         data[2] = TG3_LOOPBACK_FAILED;
12174                 goto done;
12175         }
12176
12177         if (tg3_flag(tp, ENABLE_RSS)) {
12178                 int i;
12179
12180                 /* Reroute all rx packets to the 1st queue */
12181                 for (i = MAC_RSS_INDIR_TBL_0;
12182                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12183                         tw32(i, 0x0);
12184         }
12185
12186         /* HW errata - mac loopback fails in some cases on 5780.
12187          * Normal traffic and PHY loopback are not affected by
12188          * errata.  Also, the MAC loopback test is deprecated for
12189          * all newer ASIC revisions.
12190          */
12191         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12192             !tg3_flag(tp, CPMU_PRESENT)) {
12193                 tg3_mac_loopback(tp, true);
12194
12195                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12196                         data[0] |= TG3_STD_LOOPBACK_FAILED;
12197
12198                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12199                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12200                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
12201
12202                 tg3_mac_loopback(tp, false);
12203         }
12204
12205         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12206             !tg3_flag(tp, USE_PHYLIB)) {
12207                 int i;
12208
12209                 tg3_phy_lpbk_set(tp, 0, false);
12210
12211                 /* Wait for link */
12212                 for (i = 0; i < 100; i++) {
12213                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12214                                 break;
12215                         mdelay(1);
12216                 }
12217
12218                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12219                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12220                 if (tg3_flag(tp, TSO_CAPABLE) &&
12221                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12222                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12223                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12224                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12225                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12226
12227                 if (do_extlpbk) {
12228                         tg3_phy_lpbk_set(tp, 0, true);
12229
12230                         /* All link indications report up, but the hardware
12231                          * isn't really ready for about 20 msec.  Double it
12232                          * to be sure.
12233                          */
12234                         mdelay(40);
12235
12236                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12237                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12238                         if (tg3_flag(tp, TSO_CAPABLE) &&
12239                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12240                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12241                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12242                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12243                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12244                 }
12245
12246                 /* Re-enable gphy autopowerdown. */
12247                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12248                         tg3_phy_toggle_apd(tp, true);
12249         }
12250
12251         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12252
12253 done:
12254         tp->phy_flags |= eee_cap;
12255
12256         return err;
12257 }
12258
12259 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12260                           u64 *data)
12261 {
12262         struct tg3 *tp = netdev_priv(dev);
12263         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12264
12265         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12266             tg3_power_up(tp)) {
12267                 etest->flags |= ETH_TEST_FL_FAILED;
12268                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12269                 return;
12270         }
12271
12272         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12273
12274         if (tg3_test_nvram(tp) != 0) {
12275                 etest->flags |= ETH_TEST_FL_FAILED;
12276                 data[0] = 1;
12277         }
12278         if (!doextlpbk && tg3_test_link(tp)) {
12279                 etest->flags |= ETH_TEST_FL_FAILED;
12280                 data[1] = 1;
12281         }
12282         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12283                 int err, err2 = 0, irq_sync = 0;
12284
12285                 if (netif_running(dev)) {
12286                         tg3_phy_stop(tp);
12287                         tg3_netif_stop(tp);
12288                         irq_sync = 1;
12289                 }
12290
12291                 tg3_full_lock(tp, irq_sync);
12292
12293                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12294                 err = tg3_nvram_lock(tp);
12295                 tg3_halt_cpu(tp, RX_CPU_BASE);
12296                 if (!tg3_flag(tp, 5705_PLUS))
12297                         tg3_halt_cpu(tp, TX_CPU_BASE);
12298                 if (!err)
12299                         tg3_nvram_unlock(tp);
12300
12301                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12302                         tg3_phy_reset(tp);
12303
12304                 if (tg3_test_registers(tp) != 0) {
12305                         etest->flags |= ETH_TEST_FL_FAILED;
12306                         data[2] = 1;
12307                 }
12308
12309                 if (tg3_test_memory(tp) != 0) {
12310                         etest->flags |= ETH_TEST_FL_FAILED;
12311                         data[3] = 1;
12312                 }
12313
12314                 if (doextlpbk)
12315                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12316
12317                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12318                         etest->flags |= ETH_TEST_FL_FAILED;
12319
12320                 tg3_full_unlock(tp);
12321
12322                 if (tg3_test_interrupt(tp) != 0) {
12323                         etest->flags |= ETH_TEST_FL_FAILED;
12324                         data[7] = 1;
12325                 }
12326
12327                 tg3_full_lock(tp, 0);
12328
12329                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12330                 if (netif_running(dev)) {
12331                         tg3_flag_set(tp, INIT_COMPLETE);
12332                         err2 = tg3_restart_hw(tp, 1);
12333                         if (!err2)
12334                                 tg3_netif_start(tp);
12335                 }
12336
12337                 tg3_full_unlock(tp);
12338
12339                 if (irq_sync && !err2)
12340                         tg3_phy_start(tp);
12341         }
12342         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12343                 tg3_power_down(tp);
12344
12345 }
12346
12347 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12348 {
12349         struct mii_ioctl_data *data = if_mii(ifr);
12350         struct tg3 *tp = netdev_priv(dev);
12351         int err;
12352
12353         if (tg3_flag(tp, USE_PHYLIB)) {
12354                 struct phy_device *phydev;
12355                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12356                         return -EAGAIN;
12357                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12358                 return phy_mii_ioctl(phydev, ifr, cmd);
12359         }
12360
12361         switch (cmd) {
12362         case SIOCGMIIPHY:
12363                 data->phy_id = tp->phy_addr;
12364
12365                 /* fallthru */
12366         case SIOCGMIIREG: {
12367                 u32 mii_regval;
12368
12369                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12370                         break;                  /* We have no PHY */
12371
12372                 if (!netif_running(dev))
12373                         return -EAGAIN;
12374
12375                 spin_lock_bh(&tp->lock);
12376                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12377                 spin_unlock_bh(&tp->lock);
12378
12379                 data->val_out = mii_regval;
12380
12381                 return err;
12382         }
12383
12384         case SIOCSMIIREG:
12385                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12386                         break;                  /* We have no PHY */
12387
12388                 if (!netif_running(dev))
12389                         return -EAGAIN;
12390
12391                 spin_lock_bh(&tp->lock);
12392                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12393                 spin_unlock_bh(&tp->lock);
12394
12395                 return err;
12396
12397         default:
12398                 /* do nothing */
12399                 break;
12400         }
12401         return -EOPNOTSUPP;
12402 }
12403
12404 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12405 {
12406         struct tg3 *tp = netdev_priv(dev);
12407
12408         memcpy(ec, &tp->coal, sizeof(*ec));
12409         return 0;
12410 }
12411
12412 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12413 {
12414         struct tg3 *tp = netdev_priv(dev);
12415         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12416         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12417
12418         if (!tg3_flag(tp, 5705_PLUS)) {
12419                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12420                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12421                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12422                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12423         }
12424
12425         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12426             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12427             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12428             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12429             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12430             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12431             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12432             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12433             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12434             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12435                 return -EINVAL;
12436
12437         /* No rx interrupts will be generated if both are zero */
12438         if ((ec->rx_coalesce_usecs == 0) &&
12439             (ec->rx_max_coalesced_frames == 0))
12440                 return -EINVAL;
12441
12442         /* No tx interrupts will be generated if both are zero */
12443         if ((ec->tx_coalesce_usecs == 0) &&
12444             (ec->tx_max_coalesced_frames == 0))
12445                 return -EINVAL;
12446
12447         /* Only copy relevant parameters, ignore all others. */
12448         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12449         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12450         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12451         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12452         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12453         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12454         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12455         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12456         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12457
12458         if (netif_running(dev)) {
12459                 tg3_full_lock(tp, 0);
12460                 __tg3_set_coalesce(tp, &tp->coal);
12461                 tg3_full_unlock(tp);
12462         }
12463         return 0;
12464 }
12465
12466 static const struct ethtool_ops tg3_ethtool_ops = {
12467         .get_settings           = tg3_get_settings,
12468         .set_settings           = tg3_set_settings,
12469         .get_drvinfo            = tg3_get_drvinfo,
12470         .get_regs_len           = tg3_get_regs_len,
12471         .get_regs               = tg3_get_regs,
12472         .get_wol                = tg3_get_wol,
12473         .set_wol                = tg3_set_wol,
12474         .get_msglevel           = tg3_get_msglevel,
12475         .set_msglevel           = tg3_set_msglevel,
12476         .nway_reset             = tg3_nway_reset,
12477         .get_link               = ethtool_op_get_link,
12478         .get_eeprom_len         = tg3_get_eeprom_len,
12479         .get_eeprom             = tg3_get_eeprom,
12480         .set_eeprom             = tg3_set_eeprom,
12481         .get_ringparam          = tg3_get_ringparam,
12482         .set_ringparam          = tg3_set_ringparam,
12483         .get_pauseparam         = tg3_get_pauseparam,
12484         .set_pauseparam         = tg3_set_pauseparam,
12485         .self_test              = tg3_self_test,
12486         .get_strings            = tg3_get_strings,
12487         .set_phys_id            = tg3_set_phys_id,
12488         .get_ethtool_stats      = tg3_get_ethtool_stats,
12489         .get_coalesce           = tg3_get_coalesce,
12490         .set_coalesce           = tg3_set_coalesce,
12491         .get_sset_count         = tg3_get_sset_count,
12492         .get_rxnfc              = tg3_get_rxnfc,
12493         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12494         .get_rxfh_indir         = tg3_get_rxfh_indir,
12495         .set_rxfh_indir         = tg3_set_rxfh_indir,
12496         .get_ts_info            = ethtool_op_get_ts_info,
12497 };
12498
12499 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12500                                                 struct rtnl_link_stats64 *stats)
12501 {
12502         struct tg3 *tp = netdev_priv(dev);
12503
12504         if (!tp->hw_stats)
12505                 return &tp->net_stats_prev;
12506
12507         spin_lock_bh(&tp->lock);
12508         tg3_get_nstats(tp, stats);
12509         spin_unlock_bh(&tp->lock);
12510
12511         return stats;
12512 }
12513
12514 static void tg3_set_rx_mode(struct net_device *dev)
12515 {
12516         struct tg3 *tp = netdev_priv(dev);
12517
12518         if (!netif_running(dev))
12519                 return;
12520
12521         tg3_full_lock(tp, 0);
12522         __tg3_set_rx_mode(dev);
12523         tg3_full_unlock(tp);
12524 }
12525
12526 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12527                                int new_mtu)
12528 {
12529         dev->mtu = new_mtu;
12530
12531         if (new_mtu > ETH_DATA_LEN) {
12532                 if (tg3_flag(tp, 5780_CLASS)) {
12533                         netdev_update_features(dev);
12534                         tg3_flag_clear(tp, TSO_CAPABLE);
12535                 } else {
12536                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12537                 }
12538         } else {
12539                 if (tg3_flag(tp, 5780_CLASS)) {
12540                         tg3_flag_set(tp, TSO_CAPABLE);
12541                         netdev_update_features(dev);
12542                 }
12543                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12544         }
12545 }
12546
12547 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12548 {
12549         struct tg3 *tp = netdev_priv(dev);
12550         int err, reset_phy = 0;
12551
12552         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12553                 return -EINVAL;
12554
12555         if (!netif_running(dev)) {
12556                 /* We'll just catch it later when the
12557                  * device is up'd.
12558                  */
12559                 tg3_set_mtu(dev, tp, new_mtu);
12560                 return 0;
12561         }
12562
12563         tg3_phy_stop(tp);
12564
12565         tg3_netif_stop(tp);
12566
12567         tg3_full_lock(tp, 1);
12568
12569         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12570
12571         tg3_set_mtu(dev, tp, new_mtu);
12572
12573         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12574          * breaks all requests to 256 bytes.
12575          */
12576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12577                 reset_phy = 1;
12578
12579         err = tg3_restart_hw(tp, reset_phy);
12580
12581         if (!err)
12582                 tg3_netif_start(tp);
12583
12584         tg3_full_unlock(tp);
12585
12586         if (!err)
12587                 tg3_phy_start(tp);
12588
12589         return err;
12590 }
12591
12592 static const struct net_device_ops tg3_netdev_ops = {
12593         .ndo_open               = tg3_open,
12594         .ndo_stop               = tg3_close,
12595         .ndo_start_xmit         = tg3_start_xmit,
12596         .ndo_get_stats64        = tg3_get_stats64,
12597         .ndo_validate_addr      = eth_validate_addr,
12598         .ndo_set_rx_mode        = tg3_set_rx_mode,
12599         .ndo_set_mac_address    = tg3_set_mac_addr,
12600         .ndo_do_ioctl           = tg3_ioctl,
12601         .ndo_tx_timeout         = tg3_tx_timeout,
12602         .ndo_change_mtu         = tg3_change_mtu,
12603         .ndo_fix_features       = tg3_fix_features,
12604         .ndo_set_features       = tg3_set_features,
12605 #ifdef CONFIG_NET_POLL_CONTROLLER
12606         .ndo_poll_controller    = tg3_poll_controller,
12607 #endif
12608 };
12609
12610 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12611 {
12612         u32 cursize, val, magic;
12613
12614         tp->nvram_size = EEPROM_CHIP_SIZE;
12615
12616         if (tg3_nvram_read(tp, 0, &magic) != 0)
12617                 return;
12618
12619         if ((magic != TG3_EEPROM_MAGIC) &&
12620             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12621             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12622                 return;
12623
12624         /*
12625          * Size the chip by reading offsets at increasing powers of two.
12626          * When we encounter our validation signature, we know the addressing
12627          * has wrapped around, and thus have our chip size.
12628          */
12629         cursize = 0x10;
12630
12631         while (cursize < tp->nvram_size) {
12632                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12633                         return;
12634
12635                 if (val == magic)
12636                         break;
12637
12638                 cursize <<= 1;
12639         }
12640
12641         tp->nvram_size = cursize;
12642 }
12643
12644 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12645 {
12646         u32 val;
12647
12648         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12649                 return;
12650
12651         /* Selfboot format */
12652         if (val != TG3_EEPROM_MAGIC) {
12653                 tg3_get_eeprom_size(tp);
12654                 return;
12655         }
12656
12657         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12658                 if (val != 0) {
12659                         /* This is confusing.  We want to operate on the
12660                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12661                          * call will read from NVRAM and byteswap the data
12662                          * according to the byteswapping settings for all
12663                          * other register accesses.  This ensures the data we
12664                          * want will always reside in the lower 16-bits.
12665                          * However, the data in NVRAM is in LE format, which
12666                          * means the data from the NVRAM read will always be
12667                          * opposite the endianness of the CPU.  The 16-bit
12668                          * byteswap then brings the data to CPU endianness.
12669                          */
12670                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12671                         return;
12672                 }
12673         }
12674         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12675 }
12676
12677 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12678 {
12679         u32 nvcfg1;
12680
12681         nvcfg1 = tr32(NVRAM_CFG1);
12682         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12683                 tg3_flag_set(tp, FLASH);
12684         } else {
12685                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12686                 tw32(NVRAM_CFG1, nvcfg1);
12687         }
12688
12689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12690             tg3_flag(tp, 5780_CLASS)) {
12691                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12692                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12693                         tp->nvram_jedecnum = JEDEC_ATMEL;
12694                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12695                         tg3_flag_set(tp, NVRAM_BUFFERED);
12696                         break;
12697                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12698                         tp->nvram_jedecnum = JEDEC_ATMEL;
12699                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12700                         break;
12701                 case FLASH_VENDOR_ATMEL_EEPROM:
12702                         tp->nvram_jedecnum = JEDEC_ATMEL;
12703                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12704                         tg3_flag_set(tp, NVRAM_BUFFERED);
12705                         break;
12706                 case FLASH_VENDOR_ST:
12707                         tp->nvram_jedecnum = JEDEC_ST;
12708                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12709                         tg3_flag_set(tp, NVRAM_BUFFERED);
12710                         break;
12711                 case FLASH_VENDOR_SAIFUN:
12712                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12713                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12714                         break;
12715                 case FLASH_VENDOR_SST_SMALL:
12716                 case FLASH_VENDOR_SST_LARGE:
12717                         tp->nvram_jedecnum = JEDEC_SST;
12718                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12719                         break;
12720                 }
12721         } else {
12722                 tp->nvram_jedecnum = JEDEC_ATMEL;
12723                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12724                 tg3_flag_set(tp, NVRAM_BUFFERED);
12725         }
12726 }
12727
12728 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12729 {
12730         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12731         case FLASH_5752PAGE_SIZE_256:
12732                 tp->nvram_pagesize = 256;
12733                 break;
12734         case FLASH_5752PAGE_SIZE_512:
12735                 tp->nvram_pagesize = 512;
12736                 break;
12737         case FLASH_5752PAGE_SIZE_1K:
12738                 tp->nvram_pagesize = 1024;
12739                 break;
12740         case FLASH_5752PAGE_SIZE_2K:
12741                 tp->nvram_pagesize = 2048;
12742                 break;
12743         case FLASH_5752PAGE_SIZE_4K:
12744                 tp->nvram_pagesize = 4096;
12745                 break;
12746         case FLASH_5752PAGE_SIZE_264:
12747                 tp->nvram_pagesize = 264;
12748                 break;
12749         case FLASH_5752PAGE_SIZE_528:
12750                 tp->nvram_pagesize = 528;
12751                 break;
12752         }
12753 }
12754
12755 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12756 {
12757         u32 nvcfg1;
12758
12759         nvcfg1 = tr32(NVRAM_CFG1);
12760
12761         /* NVRAM protection for TPM */
12762         if (nvcfg1 & (1 << 27))
12763                 tg3_flag_set(tp, PROTECTED_NVRAM);
12764
12765         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12766         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12767         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12768                 tp->nvram_jedecnum = JEDEC_ATMEL;
12769                 tg3_flag_set(tp, NVRAM_BUFFERED);
12770                 break;
12771         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12772                 tp->nvram_jedecnum = JEDEC_ATMEL;
12773                 tg3_flag_set(tp, NVRAM_BUFFERED);
12774                 tg3_flag_set(tp, FLASH);
12775                 break;
12776         case FLASH_5752VENDOR_ST_M45PE10:
12777         case FLASH_5752VENDOR_ST_M45PE20:
12778         case FLASH_5752VENDOR_ST_M45PE40:
12779                 tp->nvram_jedecnum = JEDEC_ST;
12780                 tg3_flag_set(tp, NVRAM_BUFFERED);
12781                 tg3_flag_set(tp, FLASH);
12782                 break;
12783         }
12784
12785         if (tg3_flag(tp, FLASH)) {
12786                 tg3_nvram_get_pagesize(tp, nvcfg1);
12787         } else {
12788                 /* For eeprom, set pagesize to maximum eeprom size */
12789                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12790
12791                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12792                 tw32(NVRAM_CFG1, nvcfg1);
12793         }
12794 }
12795
12796 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12797 {
12798         u32 nvcfg1, protect = 0;
12799
12800         nvcfg1 = tr32(NVRAM_CFG1);
12801
12802         /* NVRAM protection for TPM */
12803         if (nvcfg1 & (1 << 27)) {
12804                 tg3_flag_set(tp, PROTECTED_NVRAM);
12805                 protect = 1;
12806         }
12807
12808         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12809         switch (nvcfg1) {
12810         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12811         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12812         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12813         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12814                 tp->nvram_jedecnum = JEDEC_ATMEL;
12815                 tg3_flag_set(tp, NVRAM_BUFFERED);
12816                 tg3_flag_set(tp, FLASH);
12817                 tp->nvram_pagesize = 264;
12818                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12819                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12820                         tp->nvram_size = (protect ? 0x3e200 :
12821                                           TG3_NVRAM_SIZE_512KB);
12822                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12823                         tp->nvram_size = (protect ? 0x1f200 :
12824                                           TG3_NVRAM_SIZE_256KB);
12825                 else
12826                         tp->nvram_size = (protect ? 0x1f200 :
12827                                           TG3_NVRAM_SIZE_128KB);
12828                 break;
12829         case FLASH_5752VENDOR_ST_M45PE10:
12830         case FLASH_5752VENDOR_ST_M45PE20:
12831         case FLASH_5752VENDOR_ST_M45PE40:
12832                 tp->nvram_jedecnum = JEDEC_ST;
12833                 tg3_flag_set(tp, NVRAM_BUFFERED);
12834                 tg3_flag_set(tp, FLASH);
12835                 tp->nvram_pagesize = 256;
12836                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12837                         tp->nvram_size = (protect ?
12838                                           TG3_NVRAM_SIZE_64KB :
12839                                           TG3_NVRAM_SIZE_128KB);
12840                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12841                         tp->nvram_size = (protect ?
12842                                           TG3_NVRAM_SIZE_64KB :
12843                                           TG3_NVRAM_SIZE_256KB);
12844                 else
12845                         tp->nvram_size = (protect ?
12846                                           TG3_NVRAM_SIZE_128KB :
12847                                           TG3_NVRAM_SIZE_512KB);
12848                 break;
12849         }
12850 }
12851
12852 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12853 {
12854         u32 nvcfg1;
12855
12856         nvcfg1 = tr32(NVRAM_CFG1);
12857
12858         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12859         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12860         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12861         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12862         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12863                 tp->nvram_jedecnum = JEDEC_ATMEL;
12864                 tg3_flag_set(tp, NVRAM_BUFFERED);
12865                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12866
12867                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12868                 tw32(NVRAM_CFG1, nvcfg1);
12869                 break;
12870         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12871         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12872         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12873         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12874                 tp->nvram_jedecnum = JEDEC_ATMEL;
12875                 tg3_flag_set(tp, NVRAM_BUFFERED);
12876                 tg3_flag_set(tp, FLASH);
12877                 tp->nvram_pagesize = 264;
12878                 break;
12879         case FLASH_5752VENDOR_ST_M45PE10:
12880         case FLASH_5752VENDOR_ST_M45PE20:
12881         case FLASH_5752VENDOR_ST_M45PE40:
12882                 tp->nvram_jedecnum = JEDEC_ST;
12883                 tg3_flag_set(tp, NVRAM_BUFFERED);
12884                 tg3_flag_set(tp, FLASH);
12885                 tp->nvram_pagesize = 256;
12886                 break;
12887         }
12888 }
12889
12890 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12891 {
12892         u32 nvcfg1, protect = 0;
12893
12894         nvcfg1 = tr32(NVRAM_CFG1);
12895
12896         /* NVRAM protection for TPM */
12897         if (nvcfg1 & (1 << 27)) {
12898                 tg3_flag_set(tp, PROTECTED_NVRAM);
12899                 protect = 1;
12900         }
12901
12902         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12903         switch (nvcfg1) {
12904         case FLASH_5761VENDOR_ATMEL_ADB021D:
12905         case FLASH_5761VENDOR_ATMEL_ADB041D:
12906         case FLASH_5761VENDOR_ATMEL_ADB081D:
12907         case FLASH_5761VENDOR_ATMEL_ADB161D:
12908         case FLASH_5761VENDOR_ATMEL_MDB021D:
12909         case FLASH_5761VENDOR_ATMEL_MDB041D:
12910         case FLASH_5761VENDOR_ATMEL_MDB081D:
12911         case FLASH_5761VENDOR_ATMEL_MDB161D:
12912                 tp->nvram_jedecnum = JEDEC_ATMEL;
12913                 tg3_flag_set(tp, NVRAM_BUFFERED);
12914                 tg3_flag_set(tp, FLASH);
12915                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12916                 tp->nvram_pagesize = 256;
12917                 break;
12918         case FLASH_5761VENDOR_ST_A_M45PE20:
12919         case FLASH_5761VENDOR_ST_A_M45PE40:
12920         case FLASH_5761VENDOR_ST_A_M45PE80:
12921         case FLASH_5761VENDOR_ST_A_M45PE16:
12922         case FLASH_5761VENDOR_ST_M_M45PE20:
12923         case FLASH_5761VENDOR_ST_M_M45PE40:
12924         case FLASH_5761VENDOR_ST_M_M45PE80:
12925         case FLASH_5761VENDOR_ST_M_M45PE16:
12926                 tp->nvram_jedecnum = JEDEC_ST;
12927                 tg3_flag_set(tp, NVRAM_BUFFERED);
12928                 tg3_flag_set(tp, FLASH);
12929                 tp->nvram_pagesize = 256;
12930                 break;
12931         }
12932
12933         if (protect) {
12934                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12935         } else {
12936                 switch (nvcfg1) {
12937                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12938                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12939                 case FLASH_5761VENDOR_ST_A_M45PE16:
12940                 case FLASH_5761VENDOR_ST_M_M45PE16:
12941                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12942                         break;
12943                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12944                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12945                 case FLASH_5761VENDOR_ST_A_M45PE80:
12946                 case FLASH_5761VENDOR_ST_M_M45PE80:
12947                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12948                         break;
12949                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12950                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12951                 case FLASH_5761VENDOR_ST_A_M45PE40:
12952                 case FLASH_5761VENDOR_ST_M_M45PE40:
12953                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12954                         break;
12955                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12956                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12957                 case FLASH_5761VENDOR_ST_A_M45PE20:
12958                 case FLASH_5761VENDOR_ST_M_M45PE20:
12959                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12960                         break;
12961                 }
12962         }
12963 }
12964
12965 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12966 {
12967         tp->nvram_jedecnum = JEDEC_ATMEL;
12968         tg3_flag_set(tp, NVRAM_BUFFERED);
12969         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12970 }
12971
12972 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12973 {
12974         u32 nvcfg1;
12975
12976         nvcfg1 = tr32(NVRAM_CFG1);
12977
12978         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12979         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12980         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12981                 tp->nvram_jedecnum = JEDEC_ATMEL;
12982                 tg3_flag_set(tp, NVRAM_BUFFERED);
12983                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12984
12985                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12986                 tw32(NVRAM_CFG1, nvcfg1);
12987                 return;
12988         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12989         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12990         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12991         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12992         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12993         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12994         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12995                 tp->nvram_jedecnum = JEDEC_ATMEL;
12996                 tg3_flag_set(tp, NVRAM_BUFFERED);
12997                 tg3_flag_set(tp, FLASH);
12998
12999                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13000                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13001                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13002                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13003                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13004                         break;
13005                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13006                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13007                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13008                         break;
13009                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13010                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13011                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13012                         break;
13013                 }
13014                 break;
13015         case FLASH_5752VENDOR_ST_M45PE10:
13016         case FLASH_5752VENDOR_ST_M45PE20:
13017         case FLASH_5752VENDOR_ST_M45PE40:
13018                 tp->nvram_jedecnum = JEDEC_ST;
13019                 tg3_flag_set(tp, NVRAM_BUFFERED);
13020                 tg3_flag_set(tp, FLASH);
13021
13022                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13023                 case FLASH_5752VENDOR_ST_M45PE10:
13024                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13025                         break;
13026                 case FLASH_5752VENDOR_ST_M45PE20:
13027                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13028                         break;
13029                 case FLASH_5752VENDOR_ST_M45PE40:
13030                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13031                         break;
13032                 }
13033                 break;
13034         default:
13035                 tg3_flag_set(tp, NO_NVRAM);
13036                 return;
13037         }
13038
13039         tg3_nvram_get_pagesize(tp, nvcfg1);
13040         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13041                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13042 }
13043
13044
13045 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13046 {
13047         u32 nvcfg1;
13048
13049         nvcfg1 = tr32(NVRAM_CFG1);
13050
13051         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13052         case FLASH_5717VENDOR_ATMEL_EEPROM:
13053         case FLASH_5717VENDOR_MICRO_EEPROM:
13054                 tp->nvram_jedecnum = JEDEC_ATMEL;
13055                 tg3_flag_set(tp, NVRAM_BUFFERED);
13056                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13057
13058                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13059                 tw32(NVRAM_CFG1, nvcfg1);
13060                 return;
13061         case FLASH_5717VENDOR_ATMEL_MDB011D:
13062         case FLASH_5717VENDOR_ATMEL_ADB011B:
13063         case FLASH_5717VENDOR_ATMEL_ADB011D:
13064         case FLASH_5717VENDOR_ATMEL_MDB021D:
13065         case FLASH_5717VENDOR_ATMEL_ADB021B:
13066         case FLASH_5717VENDOR_ATMEL_ADB021D:
13067         case FLASH_5717VENDOR_ATMEL_45USPT:
13068                 tp->nvram_jedecnum = JEDEC_ATMEL;
13069                 tg3_flag_set(tp, NVRAM_BUFFERED);
13070                 tg3_flag_set(tp, FLASH);
13071
13072                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13073                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13074                         /* Detect size with tg3_nvram_get_size() */
13075                         break;
13076                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13077                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13078                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13079                         break;
13080                 default:
13081                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13082                         break;
13083                 }
13084                 break;
13085         case FLASH_5717VENDOR_ST_M_M25PE10:
13086         case FLASH_5717VENDOR_ST_A_M25PE10:
13087         case FLASH_5717VENDOR_ST_M_M45PE10:
13088         case FLASH_5717VENDOR_ST_A_M45PE10:
13089         case FLASH_5717VENDOR_ST_M_M25PE20:
13090         case FLASH_5717VENDOR_ST_A_M25PE20:
13091         case FLASH_5717VENDOR_ST_M_M45PE20:
13092         case FLASH_5717VENDOR_ST_A_M45PE20:
13093         case FLASH_5717VENDOR_ST_25USPT:
13094         case FLASH_5717VENDOR_ST_45USPT:
13095                 tp->nvram_jedecnum = JEDEC_ST;
13096                 tg3_flag_set(tp, NVRAM_BUFFERED);
13097                 tg3_flag_set(tp, FLASH);
13098
13099                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13100                 case FLASH_5717VENDOR_ST_M_M25PE20:
13101                 case FLASH_5717VENDOR_ST_M_M45PE20:
13102                         /* Detect size with tg3_nvram_get_size() */
13103                         break;
13104                 case FLASH_5717VENDOR_ST_A_M25PE20:
13105                 case FLASH_5717VENDOR_ST_A_M45PE20:
13106                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13107                         break;
13108                 default:
13109                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13110                         break;
13111                 }
13112                 break;
13113         default:
13114                 tg3_flag_set(tp, NO_NVRAM);
13115                 return;
13116         }
13117
13118         tg3_nvram_get_pagesize(tp, nvcfg1);
13119         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13120                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13121 }
13122
13123 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13124 {
13125         u32 nvcfg1, nvmpinstrp;
13126
13127         nvcfg1 = tr32(NVRAM_CFG1);
13128         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13129
13130         switch (nvmpinstrp) {
13131         case FLASH_5720_EEPROM_HD:
13132         case FLASH_5720_EEPROM_LD:
13133                 tp->nvram_jedecnum = JEDEC_ATMEL;
13134                 tg3_flag_set(tp, NVRAM_BUFFERED);
13135
13136                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13137                 tw32(NVRAM_CFG1, nvcfg1);
13138                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13139                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13140                 else
13141                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13142                 return;
13143         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13144         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13145         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13146         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13147         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13148         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13149         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13150         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13151         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13152         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13153         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13154         case FLASH_5720VENDOR_ATMEL_45USPT:
13155                 tp->nvram_jedecnum = JEDEC_ATMEL;
13156                 tg3_flag_set(tp, NVRAM_BUFFERED);
13157                 tg3_flag_set(tp, FLASH);
13158
13159                 switch (nvmpinstrp) {
13160                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13161                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13162                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13163                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13164                         break;
13165                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13166                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13167                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13168                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13169                         break;
13170                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13171                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13172                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13173                         break;
13174                 default:
13175                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13176                         break;
13177                 }
13178                 break;
13179         case FLASH_5720VENDOR_M_ST_M25PE10:
13180         case FLASH_5720VENDOR_M_ST_M45PE10:
13181         case FLASH_5720VENDOR_A_ST_M25PE10:
13182         case FLASH_5720VENDOR_A_ST_M45PE10:
13183         case FLASH_5720VENDOR_M_ST_M25PE20:
13184         case FLASH_5720VENDOR_M_ST_M45PE20:
13185         case FLASH_5720VENDOR_A_ST_M25PE20:
13186         case FLASH_5720VENDOR_A_ST_M45PE20:
13187         case FLASH_5720VENDOR_M_ST_M25PE40:
13188         case FLASH_5720VENDOR_M_ST_M45PE40:
13189         case FLASH_5720VENDOR_A_ST_M25PE40:
13190         case FLASH_5720VENDOR_A_ST_M45PE40:
13191         case FLASH_5720VENDOR_M_ST_M25PE80:
13192         case FLASH_5720VENDOR_M_ST_M45PE80:
13193         case FLASH_5720VENDOR_A_ST_M25PE80:
13194         case FLASH_5720VENDOR_A_ST_M45PE80:
13195         case FLASH_5720VENDOR_ST_25USPT:
13196         case FLASH_5720VENDOR_ST_45USPT:
13197                 tp->nvram_jedecnum = JEDEC_ST;
13198                 tg3_flag_set(tp, NVRAM_BUFFERED);
13199                 tg3_flag_set(tp, FLASH);
13200
13201                 switch (nvmpinstrp) {
13202                 case FLASH_5720VENDOR_M_ST_M25PE20:
13203                 case FLASH_5720VENDOR_M_ST_M45PE20:
13204                 case FLASH_5720VENDOR_A_ST_M25PE20:
13205                 case FLASH_5720VENDOR_A_ST_M45PE20:
13206                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13207                         break;
13208                 case FLASH_5720VENDOR_M_ST_M25PE40:
13209                 case FLASH_5720VENDOR_M_ST_M45PE40:
13210                 case FLASH_5720VENDOR_A_ST_M25PE40:
13211                 case FLASH_5720VENDOR_A_ST_M45PE40:
13212                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13213                         break;
13214                 case FLASH_5720VENDOR_M_ST_M25PE80:
13215                 case FLASH_5720VENDOR_M_ST_M45PE80:
13216                 case FLASH_5720VENDOR_A_ST_M25PE80:
13217                 case FLASH_5720VENDOR_A_ST_M45PE80:
13218                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13219                         break;
13220                 default:
13221                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13222                         break;
13223                 }
13224                 break;
13225         default:
13226                 tg3_flag_set(tp, NO_NVRAM);
13227                 return;
13228         }
13229
13230         tg3_nvram_get_pagesize(tp, nvcfg1);
13231         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13232                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13233 }
13234
13235 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13236 static void __devinit tg3_nvram_init(struct tg3 *tp)
13237 {
13238         tw32_f(GRC_EEPROM_ADDR,
13239              (EEPROM_ADDR_FSM_RESET |
13240               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13241                EEPROM_ADDR_CLKPERD_SHIFT)));
13242
13243         msleep(1);
13244
13245         /* Enable seeprom accesses. */
13246         tw32_f(GRC_LOCAL_CTRL,
13247              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13248         udelay(100);
13249
13250         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13251             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13252                 tg3_flag_set(tp, NVRAM);
13253
13254                 if (tg3_nvram_lock(tp)) {
13255                         netdev_warn(tp->dev,
13256                                     "Cannot get nvram lock, %s failed\n",
13257                                     __func__);
13258                         return;
13259                 }
13260                 tg3_enable_nvram_access(tp);
13261
13262                 tp->nvram_size = 0;
13263
13264                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13265                         tg3_get_5752_nvram_info(tp);
13266                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13267                         tg3_get_5755_nvram_info(tp);
13268                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13269                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13270                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13271                         tg3_get_5787_nvram_info(tp);
13272                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13273                         tg3_get_5761_nvram_info(tp);
13274                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13275                         tg3_get_5906_nvram_info(tp);
13276                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13277                          tg3_flag(tp, 57765_CLASS))
13278                         tg3_get_57780_nvram_info(tp);
13279                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13280                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13281                         tg3_get_5717_nvram_info(tp);
13282                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13283                         tg3_get_5720_nvram_info(tp);
13284                 else
13285                         tg3_get_nvram_info(tp);
13286
13287                 if (tp->nvram_size == 0)
13288                         tg3_get_nvram_size(tp);
13289
13290                 tg3_disable_nvram_access(tp);
13291                 tg3_nvram_unlock(tp);
13292
13293         } else {
13294                 tg3_flag_clear(tp, NVRAM);
13295                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13296
13297                 tg3_get_eeprom_size(tp);
13298         }
13299 }
13300
13301 struct subsys_tbl_ent {
13302         u16 subsys_vendor, subsys_devid;
13303         u32 phy_id;
13304 };
13305
13306 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13307         /* Broadcom boards. */
13308         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13309           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13310         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13311           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13312         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13313           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13314         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13315           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13316         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13317           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13318         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13319           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13320         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13321           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13322         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13323           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13324         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13325           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13326         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13327           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13328         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13329           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13330
13331         /* 3com boards. */
13332         { TG3PCI_SUBVENDOR_ID_3COM,
13333           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13334         { TG3PCI_SUBVENDOR_ID_3COM,
13335           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13336         { TG3PCI_SUBVENDOR_ID_3COM,
13337           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13338         { TG3PCI_SUBVENDOR_ID_3COM,
13339           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13340         { TG3PCI_SUBVENDOR_ID_3COM,
13341           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13342
13343         /* DELL boards. */
13344         { TG3PCI_SUBVENDOR_ID_DELL,
13345           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13346         { TG3PCI_SUBVENDOR_ID_DELL,
13347           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13348         { TG3PCI_SUBVENDOR_ID_DELL,
13349           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13350         { TG3PCI_SUBVENDOR_ID_DELL,
13351           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13352
13353         /* Compaq boards. */
13354         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13355           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13356         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13357           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13358         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13359           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13360         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13361           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13362         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13363           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13364
13365         /* IBM boards. */
13366         { TG3PCI_SUBVENDOR_ID_IBM,
13367           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13368 };
13369
13370 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13371 {
13372         int i;
13373
13374         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13375                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13376                      tp->pdev->subsystem_vendor) &&
13377                     (subsys_id_to_phy_id[i].subsys_devid ==
13378                      tp->pdev->subsystem_device))
13379                         return &subsys_id_to_phy_id[i];
13380         }
13381         return NULL;
13382 }
13383
13384 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13385 {
13386         u32 val;
13387
13388         tp->phy_id = TG3_PHY_ID_INVALID;
13389         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13390
13391         /* Assume an onboard device and WOL capable by default.  */
13392         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13393         tg3_flag_set(tp, WOL_CAP);
13394
13395         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13396                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13397                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13398                         tg3_flag_set(tp, IS_NIC);
13399                 }
13400                 val = tr32(VCPU_CFGSHDW);
13401                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13402                         tg3_flag_set(tp, ASPM_WORKAROUND);
13403                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13404                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13405                         tg3_flag_set(tp, WOL_ENABLE);
13406                         device_set_wakeup_enable(&tp->pdev->dev, true);
13407                 }
13408                 goto done;
13409         }
13410
13411         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13412         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13413                 u32 nic_cfg, led_cfg;
13414                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13415                 int eeprom_phy_serdes = 0;
13416
13417                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13418                 tp->nic_sram_data_cfg = nic_cfg;
13419
13420                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13421                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13422                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13423                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13424                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13425                     (ver > 0) && (ver < 0x100))
13426                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13427
13428                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13429                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13430
13431                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13432                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13433                         eeprom_phy_serdes = 1;
13434
13435                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13436                 if (nic_phy_id != 0) {
13437                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13438                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13439
13440                         eeprom_phy_id  = (id1 >> 16) << 10;
13441                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13442                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13443                 } else
13444                         eeprom_phy_id = 0;
13445
13446                 tp->phy_id = eeprom_phy_id;
13447                 if (eeprom_phy_serdes) {
13448                         if (!tg3_flag(tp, 5705_PLUS))
13449                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13450                         else
13451                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13452                 }
13453
13454                 if (tg3_flag(tp, 5750_PLUS))
13455                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13456                                     SHASTA_EXT_LED_MODE_MASK);
13457                 else
13458                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13459
13460                 switch (led_cfg) {
13461                 default:
13462                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13463                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13464                         break;
13465
13466                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13467                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13468                         break;
13469
13470                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13471                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13472
13473                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13474                          * read on some older 5700/5701 bootcode.
13475                          */
13476                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13477                             ASIC_REV_5700 ||
13478                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13479                             ASIC_REV_5701)
13480                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13481
13482                         break;
13483
13484                 case SHASTA_EXT_LED_SHARED:
13485                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13486                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13487                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13488                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13489                                                  LED_CTRL_MODE_PHY_2);
13490                         break;
13491
13492                 case SHASTA_EXT_LED_MAC:
13493                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13494                         break;
13495
13496                 case SHASTA_EXT_LED_COMBO:
13497                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13498                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13499                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13500                                                  LED_CTRL_MODE_PHY_2);
13501                         break;
13502
13503                 }
13504
13505                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13506                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13507                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13508                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13509
13510                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13511                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13512
13513                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13514                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13515                         if ((tp->pdev->subsystem_vendor ==
13516                              PCI_VENDOR_ID_ARIMA) &&
13517                             (tp->pdev->subsystem_device == 0x205a ||
13518                              tp->pdev->subsystem_device == 0x2063))
13519                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13520                 } else {
13521                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13522                         tg3_flag_set(tp, IS_NIC);
13523                 }
13524
13525                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13526                         tg3_flag_set(tp, ENABLE_ASF);
13527                         if (tg3_flag(tp, 5750_PLUS))
13528                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13529                 }
13530
13531                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13532                     tg3_flag(tp, 5750_PLUS))
13533                         tg3_flag_set(tp, ENABLE_APE);
13534
13535                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13536                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13537                         tg3_flag_clear(tp, WOL_CAP);
13538
13539                 if (tg3_flag(tp, WOL_CAP) &&
13540                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13541                         tg3_flag_set(tp, WOL_ENABLE);
13542                         device_set_wakeup_enable(&tp->pdev->dev, true);
13543                 }
13544
13545                 if (cfg2 & (1 << 17))
13546                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13547
13548                 /* serdes signal pre-emphasis in register 0x590 set by */
13549                 /* bootcode if bit 18 is set */
13550                 if (cfg2 & (1 << 18))
13551                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13552
13553                 if ((tg3_flag(tp, 57765_PLUS) ||
13554                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13555                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13556                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13557                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13558
13559                 if (tg3_flag(tp, PCI_EXPRESS) &&
13560                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13561                     !tg3_flag(tp, 57765_PLUS)) {
13562                         u32 cfg3;
13563
13564                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13565                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13566                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13567                 }
13568
13569                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13570                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13571                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13572                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13573                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13574                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13575         }
13576 done:
13577         if (tg3_flag(tp, WOL_CAP))
13578                 device_set_wakeup_enable(&tp->pdev->dev,
13579                                          tg3_flag(tp, WOL_ENABLE));
13580         else
13581                 device_set_wakeup_capable(&tp->pdev->dev, false);
13582 }
13583
13584 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13585 {
13586         int i;
13587         u32 val;
13588
13589         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13590         tw32(OTP_CTRL, cmd);
13591
13592         /* Wait for up to 1 ms for command to execute. */
13593         for (i = 0; i < 100; i++) {
13594                 val = tr32(OTP_STATUS);
13595                 if (val & OTP_STATUS_CMD_DONE)
13596                         break;
13597                 udelay(10);
13598         }
13599
13600         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13601 }
13602
13603 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13604  * configuration is a 32-bit value that straddles the alignment boundary.
13605  * We do two 32-bit reads and then shift and merge the results.
13606  */
13607 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13608 {
13609         u32 bhalf_otp, thalf_otp;
13610
13611         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13612
13613         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13614                 return 0;
13615
13616         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13617
13618         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13619                 return 0;
13620
13621         thalf_otp = tr32(OTP_READ_DATA);
13622
13623         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13624
13625         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13626                 return 0;
13627
13628         bhalf_otp = tr32(OTP_READ_DATA);
13629
13630         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13631 }
13632
13633 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13634 {
13635         u32 adv = ADVERTISED_Autoneg;
13636
13637         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13638                 adv |= ADVERTISED_1000baseT_Half |
13639                        ADVERTISED_1000baseT_Full;
13640
13641         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13642                 adv |= ADVERTISED_100baseT_Half |
13643                        ADVERTISED_100baseT_Full |
13644                        ADVERTISED_10baseT_Half |
13645                        ADVERTISED_10baseT_Full |
13646                        ADVERTISED_TP;
13647         else
13648                 adv |= ADVERTISED_FIBRE;
13649
13650         tp->link_config.advertising = adv;
13651         tp->link_config.speed = SPEED_UNKNOWN;
13652         tp->link_config.duplex = DUPLEX_UNKNOWN;
13653         tp->link_config.autoneg = AUTONEG_ENABLE;
13654         tp->link_config.active_speed = SPEED_UNKNOWN;
13655         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13656
13657         tp->old_link = -1;
13658 }
13659
13660 static int __devinit tg3_phy_probe(struct tg3 *tp)
13661 {
13662         u32 hw_phy_id_1, hw_phy_id_2;
13663         u32 hw_phy_id, hw_phy_id_masked;
13664         int err;
13665
13666         /* flow control autonegotiation is default behavior */
13667         tg3_flag_set(tp, PAUSE_AUTONEG);
13668         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13669
13670         if (tg3_flag(tp, ENABLE_APE)) {
13671                 switch (tp->pci_fn) {
13672                 case 0:
13673                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13674                         break;
13675                 case 1:
13676                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13677                         break;
13678                 case 2:
13679                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13680                         break;
13681                 case 3:
13682                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13683                         break;
13684                 }
13685         }
13686
13687         if (tg3_flag(tp, USE_PHYLIB))
13688                 return tg3_phy_init(tp);
13689
13690         /* Reading the PHY ID register can conflict with ASF
13691          * firmware access to the PHY hardware.
13692          */
13693         err = 0;
13694         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13695                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13696         } else {
13697                 /* Now read the physical PHY_ID from the chip and verify
13698                  * that it is sane.  If it doesn't look good, we fall back
13699                  * to either the hard-coded table based PHY_ID and failing
13700                  * that the value found in the eeprom area.
13701                  */
13702                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13703                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13704
13705                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13706                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13707                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13708
13709                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13710         }
13711
13712         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13713                 tp->phy_id = hw_phy_id;
13714                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13715                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13716                 else
13717                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13718         } else {
13719                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13720                         /* Do nothing, phy ID already set up in
13721                          * tg3_get_eeprom_hw_cfg().
13722                          */
13723                 } else {
13724                         struct subsys_tbl_ent *p;
13725
13726                         /* No eeprom signature?  Try the hardcoded
13727                          * subsys device table.
13728                          */
13729                         p = tg3_lookup_by_subsys(tp);
13730                         if (!p)
13731                                 return -ENODEV;
13732
13733                         tp->phy_id = p->phy_id;
13734                         if (!tp->phy_id ||
13735                             tp->phy_id == TG3_PHY_ID_BCM8002)
13736                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13737                 }
13738         }
13739
13740         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13741             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13742              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13743              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13744               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13745              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13746               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13747                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13748
13749         tg3_phy_init_link_config(tp);
13750
13751         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13752             !tg3_flag(tp, ENABLE_APE) &&
13753             !tg3_flag(tp, ENABLE_ASF)) {
13754                 u32 bmsr, dummy;
13755
13756                 tg3_readphy(tp, MII_BMSR, &bmsr);
13757                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13758                     (bmsr & BMSR_LSTATUS))
13759                         goto skip_phy_reset;
13760
13761                 err = tg3_phy_reset(tp);
13762                 if (err)
13763                         return err;
13764
13765                 tg3_phy_set_wirespeed(tp);
13766
13767                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13768                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13769                                             tp->link_config.flowctrl);
13770
13771                         tg3_writephy(tp, MII_BMCR,
13772                                      BMCR_ANENABLE | BMCR_ANRESTART);
13773                 }
13774         }
13775
13776 skip_phy_reset:
13777         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13778                 err = tg3_init_5401phy_dsp(tp);
13779                 if (err)
13780                         return err;
13781
13782                 err = tg3_init_5401phy_dsp(tp);
13783         }
13784
13785         return err;
13786 }
13787
13788 static void __devinit tg3_read_vpd(struct tg3 *tp)
13789 {
13790         u8 *vpd_data;
13791         unsigned int block_end, rosize, len;
13792         u32 vpdlen;
13793         int j, i = 0;
13794
13795         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13796         if (!vpd_data)
13797                 goto out_no_vpd;
13798
13799         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13800         if (i < 0)
13801                 goto out_not_found;
13802
13803         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13804         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13805         i += PCI_VPD_LRDT_TAG_SIZE;
13806
13807         if (block_end > vpdlen)
13808                 goto out_not_found;
13809
13810         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13811                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13812         if (j > 0) {
13813                 len = pci_vpd_info_field_size(&vpd_data[j]);
13814
13815                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13816                 if (j + len > block_end || len != 4 ||
13817                     memcmp(&vpd_data[j], "1028", 4))
13818                         goto partno;
13819
13820                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13821                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13822                 if (j < 0)
13823                         goto partno;
13824
13825                 len = pci_vpd_info_field_size(&vpd_data[j]);
13826
13827                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13828                 if (j + len > block_end)
13829                         goto partno;
13830
13831                 memcpy(tp->fw_ver, &vpd_data[j], len);
13832                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13833         }
13834
13835 partno:
13836         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13837                                       PCI_VPD_RO_KEYWORD_PARTNO);
13838         if (i < 0)
13839                 goto out_not_found;
13840
13841         len = pci_vpd_info_field_size(&vpd_data[i]);
13842
13843         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13844         if (len > TG3_BPN_SIZE ||
13845             (len + i) > vpdlen)
13846                 goto out_not_found;
13847
13848         memcpy(tp->board_part_number, &vpd_data[i], len);
13849
13850 out_not_found:
13851         kfree(vpd_data);
13852         if (tp->board_part_number[0])
13853                 return;
13854
13855 out_no_vpd:
13856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13857                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13858                         strcpy(tp->board_part_number, "BCM5717");
13859                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13860                         strcpy(tp->board_part_number, "BCM5718");
13861                 else
13862                         goto nomatch;
13863         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13864                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13865                         strcpy(tp->board_part_number, "BCM57780");
13866                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13867                         strcpy(tp->board_part_number, "BCM57760");
13868                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13869                         strcpy(tp->board_part_number, "BCM57790");
13870                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13871                         strcpy(tp->board_part_number, "BCM57788");
13872                 else
13873                         goto nomatch;
13874         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13875                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13876                         strcpy(tp->board_part_number, "BCM57761");
13877                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13878                         strcpy(tp->board_part_number, "BCM57765");
13879                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13880                         strcpy(tp->board_part_number, "BCM57781");
13881                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13882                         strcpy(tp->board_part_number, "BCM57785");
13883                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13884                         strcpy(tp->board_part_number, "BCM57791");
13885                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13886                         strcpy(tp->board_part_number, "BCM57795");
13887                 else
13888                         goto nomatch;
13889         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13890                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13891                         strcpy(tp->board_part_number, "BCM57762");
13892                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13893                         strcpy(tp->board_part_number, "BCM57766");
13894                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13895                         strcpy(tp->board_part_number, "BCM57782");
13896                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13897                         strcpy(tp->board_part_number, "BCM57786");
13898                 else
13899                         goto nomatch;
13900         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13901                 strcpy(tp->board_part_number, "BCM95906");
13902         } else {
13903 nomatch:
13904                 strcpy(tp->board_part_number, "none");
13905         }
13906 }
13907
13908 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13909 {
13910         u32 val;
13911
13912         if (tg3_nvram_read(tp, offset, &val) ||
13913             (val & 0xfc000000) != 0x0c000000 ||
13914             tg3_nvram_read(tp, offset + 4, &val) ||
13915             val != 0)
13916                 return 0;
13917
13918         return 1;
13919 }
13920
13921 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13922 {
13923         u32 val, offset, start, ver_offset;
13924         int i, dst_off;
13925         bool newver = false;
13926
13927         if (tg3_nvram_read(tp, 0xc, &offset) ||
13928             tg3_nvram_read(tp, 0x4, &start))
13929                 return;
13930
13931         offset = tg3_nvram_logical_addr(tp, offset);
13932
13933         if (tg3_nvram_read(tp, offset, &val))
13934                 return;
13935
13936         if ((val & 0xfc000000) == 0x0c000000) {
13937                 if (tg3_nvram_read(tp, offset + 4, &val))
13938                         return;
13939
13940                 if (val == 0)
13941                         newver = true;
13942         }
13943
13944         dst_off = strlen(tp->fw_ver);
13945
13946         if (newver) {
13947                 if (TG3_VER_SIZE - dst_off < 16 ||
13948                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13949                         return;
13950
13951                 offset = offset + ver_offset - start;
13952                 for (i = 0; i < 16; i += 4) {
13953                         __be32 v;
13954                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13955                                 return;
13956
13957                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13958                 }
13959         } else {
13960                 u32 major, minor;
13961
13962                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13963                         return;
13964
13965                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13966                         TG3_NVM_BCVER_MAJSFT;
13967                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13968                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13969                          "v%d.%02d", major, minor);
13970         }
13971 }
13972
13973 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13974 {
13975         u32 val, major, minor;
13976
13977         /* Use native endian representation */
13978         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13979                 return;
13980
13981         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13982                 TG3_NVM_HWSB_CFG1_MAJSFT;
13983         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13984                 TG3_NVM_HWSB_CFG1_MINSFT;
13985
13986         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13987 }
13988
13989 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13990 {
13991         u32 offset, major, minor, build;
13992
13993         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13994
13995         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13996                 return;
13997
13998         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13999         case TG3_EEPROM_SB_REVISION_0:
14000                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14001                 break;
14002         case TG3_EEPROM_SB_REVISION_2:
14003                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14004                 break;
14005         case TG3_EEPROM_SB_REVISION_3:
14006                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14007                 break;
14008         case TG3_EEPROM_SB_REVISION_4:
14009                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14010                 break;
14011         case TG3_EEPROM_SB_REVISION_5:
14012                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14013                 break;
14014         case TG3_EEPROM_SB_REVISION_6:
14015                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14016                 break;
14017         default:
14018                 return;
14019         }
14020
14021         if (tg3_nvram_read(tp, offset, &val))
14022                 return;
14023
14024         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14025                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14026         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14027                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14028         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14029
14030         if (minor > 99 || build > 26)
14031                 return;
14032
14033         offset = strlen(tp->fw_ver);
14034         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14035                  " v%d.%02d", major, minor);
14036
14037         if (build > 0) {
14038                 offset = strlen(tp->fw_ver);
14039                 if (offset < TG3_VER_SIZE - 1)
14040                         tp->fw_ver[offset] = 'a' + build - 1;
14041         }
14042 }
14043
14044 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14045 {
14046         u32 val, offset, start;
14047         int i, vlen;
14048
14049         for (offset = TG3_NVM_DIR_START;
14050              offset < TG3_NVM_DIR_END;
14051              offset += TG3_NVM_DIRENT_SIZE) {
14052                 if (tg3_nvram_read(tp, offset, &val))
14053                         return;
14054
14055                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14056                         break;
14057         }
14058
14059         if (offset == TG3_NVM_DIR_END)
14060                 return;
14061
14062         if (!tg3_flag(tp, 5705_PLUS))
14063                 start = 0x08000000;
14064         else if (tg3_nvram_read(tp, offset - 4, &start))
14065                 return;
14066
14067         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14068             !tg3_fw_img_is_valid(tp, offset) ||
14069             tg3_nvram_read(tp, offset + 8, &val))
14070                 return;
14071
14072         offset += val - start;
14073
14074         vlen = strlen(tp->fw_ver);
14075
14076         tp->fw_ver[vlen++] = ',';
14077         tp->fw_ver[vlen++] = ' ';
14078
14079         for (i = 0; i < 4; i++) {
14080                 __be32 v;
14081                 if (tg3_nvram_read_be32(tp, offset, &v))
14082                         return;
14083
14084                 offset += sizeof(v);
14085
14086                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14087                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14088                         break;
14089                 }
14090
14091                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14092                 vlen += sizeof(v);
14093         }
14094 }
14095
14096 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14097 {
14098         u32 apedata;
14099
14100         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14101         if (apedata != APE_SEG_SIG_MAGIC)
14102                 return;
14103
14104         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14105         if (!(apedata & APE_FW_STATUS_READY))
14106                 return;
14107
14108         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14109                 tg3_flag_set(tp, APE_HAS_NCSI);
14110 }
14111
14112 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14113 {
14114         int vlen;
14115         u32 apedata;
14116         char *fwtype;
14117
14118         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14119
14120         if (tg3_flag(tp, APE_HAS_NCSI))
14121                 fwtype = "NCSI";
14122         else
14123                 fwtype = "DASH";
14124
14125         vlen = strlen(tp->fw_ver);
14126
14127         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14128                  fwtype,
14129                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14130                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14131                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14132                  (apedata & APE_FW_VERSION_BLDMSK));
14133 }
14134
14135 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14136 {
14137         u32 val;
14138         bool vpd_vers = false;
14139
14140         if (tp->fw_ver[0] != 0)
14141                 vpd_vers = true;
14142
14143         if (tg3_flag(tp, NO_NVRAM)) {
14144                 strcat(tp->fw_ver, "sb");
14145                 return;
14146         }
14147
14148         if (tg3_nvram_read(tp, 0, &val))
14149                 return;
14150
14151         if (val == TG3_EEPROM_MAGIC)
14152                 tg3_read_bc_ver(tp);
14153         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14154                 tg3_read_sb_ver(tp, val);
14155         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14156                 tg3_read_hwsb_ver(tp);
14157
14158         if (tg3_flag(tp, ENABLE_ASF)) {
14159                 if (tg3_flag(tp, ENABLE_APE)) {
14160                         tg3_probe_ncsi(tp);
14161                         if (!vpd_vers)
14162                                 tg3_read_dash_ver(tp);
14163                 } else if (!vpd_vers) {
14164                         tg3_read_mgmtfw_ver(tp);
14165                 }
14166         }
14167
14168         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14169 }
14170
14171 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14172 {
14173         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14174                 return TG3_RX_RET_MAX_SIZE_5717;
14175         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14176                 return TG3_RX_RET_MAX_SIZE_5700;
14177         else
14178                 return TG3_RX_RET_MAX_SIZE_5705;
14179 }
14180
14181 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14182         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14183         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14184         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14185         { },
14186 };
14187
14188 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14189 {
14190         struct pci_dev *peer;
14191         unsigned int func, devnr = tp->pdev->devfn & ~7;
14192
14193         for (func = 0; func < 8; func++) {
14194                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14195                 if (peer && peer != tp->pdev)
14196                         break;
14197                 pci_dev_put(peer);
14198         }
14199         /* 5704 can be configured in single-port mode, set peer to
14200          * tp->pdev in that case.
14201          */
14202         if (!peer) {
14203                 peer = tp->pdev;
14204                 return peer;
14205         }
14206
14207         /*
14208          * We don't need to keep the refcount elevated; there's no way
14209          * to remove one half of this device without removing the other
14210          */
14211         pci_dev_put(peer);
14212
14213         return peer;
14214 }
14215
14216 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14217 {
14218         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14219         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14220                 u32 reg;
14221
14222                 /* All devices that use the alternate
14223                  * ASIC REV location have a CPMU.
14224                  */
14225                 tg3_flag_set(tp, CPMU_PRESENT);
14226
14227                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14228                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14229                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14230                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14231                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14232                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14233                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14234                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14235                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14236                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14237                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14238                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14239                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14240                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14241                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14242                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14243                 else
14244                         reg = TG3PCI_PRODID_ASICREV;
14245
14246                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14247         }
14248
14249         /* Wrong chip ID in 5752 A0. This code can be removed later
14250          * as A0 is not in production.
14251          */
14252         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14253                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14254
14255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14256             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14257             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14258                 tg3_flag_set(tp, 5717_PLUS);
14259
14260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14261             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14262                 tg3_flag_set(tp, 57765_CLASS);
14263
14264         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14265                 tg3_flag_set(tp, 57765_PLUS);
14266
14267         /* Intentionally exclude ASIC_REV_5906 */
14268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14269             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14270             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14271             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14274             tg3_flag(tp, 57765_PLUS))
14275                 tg3_flag_set(tp, 5755_PLUS);
14276
14277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14278             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14279                 tg3_flag_set(tp, 5780_CLASS);
14280
14281         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14282             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14283             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14284             tg3_flag(tp, 5755_PLUS) ||
14285             tg3_flag(tp, 5780_CLASS))
14286                 tg3_flag_set(tp, 5750_PLUS);
14287
14288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14289             tg3_flag(tp, 5750_PLUS))
14290                 tg3_flag_set(tp, 5705_PLUS);
14291 }
14292
14293 static int __devinit tg3_get_invariants(struct tg3 *tp)
14294 {
14295         u32 misc_ctrl_reg;
14296         u32 pci_state_reg, grc_misc_cfg;
14297         u32 val;
14298         u16 pci_cmd;
14299         int err;
14300
14301         /* Force memory write invalidate off.  If we leave it on,
14302          * then on 5700_BX chips we have to enable a workaround.
14303          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14304          * to match the cacheline size.  The Broadcom driver have this
14305          * workaround but turns MWI off all the times so never uses
14306          * it.  This seems to suggest that the workaround is insufficient.
14307          */
14308         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14309         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14310         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14311
14312         /* Important! -- Make sure register accesses are byteswapped
14313          * correctly.  Also, for those chips that require it, make
14314          * sure that indirect register accesses are enabled before
14315          * the first operation.
14316          */
14317         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14318                               &misc_ctrl_reg);
14319         tp->misc_host_ctrl |= (misc_ctrl_reg &
14320                                MISC_HOST_CTRL_CHIPREV);
14321         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14322                                tp->misc_host_ctrl);
14323
14324         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14325
14326         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14327          * we need to disable memory and use config. cycles
14328          * only to access all registers. The 5702/03 chips
14329          * can mistakenly decode the special cycles from the
14330          * ICH chipsets as memory write cycles, causing corruption
14331          * of register and memory space. Only certain ICH bridges
14332          * will drive special cycles with non-zero data during the
14333          * address phase which can fall within the 5703's address
14334          * range. This is not an ICH bug as the PCI spec allows
14335          * non-zero address during special cycles. However, only
14336          * these ICH bridges are known to drive non-zero addresses
14337          * during special cycles.
14338          *
14339          * Since special cycles do not cross PCI bridges, we only
14340          * enable this workaround if the 5703 is on the secondary
14341          * bus of these ICH bridges.
14342          */
14343         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14344             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14345                 static struct tg3_dev_id {
14346                         u32     vendor;
14347                         u32     device;
14348                         u32     rev;
14349                 } ich_chipsets[] = {
14350                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14351                           PCI_ANY_ID },
14352                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14353                           PCI_ANY_ID },
14354                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14355                           0xa },
14356                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14357                           PCI_ANY_ID },
14358                         { },
14359                 };
14360                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14361                 struct pci_dev *bridge = NULL;
14362
14363                 while (pci_id->vendor != 0) {
14364                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14365                                                 bridge);
14366                         if (!bridge) {
14367                                 pci_id++;
14368                                 continue;
14369                         }
14370                         if (pci_id->rev != PCI_ANY_ID) {
14371                                 if (bridge->revision > pci_id->rev)
14372                                         continue;
14373                         }
14374                         if (bridge->subordinate &&
14375                             (bridge->subordinate->number ==
14376                              tp->pdev->bus->number)) {
14377                                 tg3_flag_set(tp, ICH_WORKAROUND);
14378                                 pci_dev_put(bridge);
14379                                 break;
14380                         }
14381                 }
14382         }
14383
14384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14385                 static struct tg3_dev_id {
14386                         u32     vendor;
14387                         u32     device;
14388                 } bridge_chipsets[] = {
14389                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14390                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14391                         { },
14392                 };
14393                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14394                 struct pci_dev *bridge = NULL;
14395
14396                 while (pci_id->vendor != 0) {
14397                         bridge = pci_get_device(pci_id->vendor,
14398                                                 pci_id->device,
14399                                                 bridge);
14400                         if (!bridge) {
14401                                 pci_id++;
14402                                 continue;
14403                         }
14404                         if (bridge->subordinate &&
14405                             (bridge->subordinate->number <=
14406                              tp->pdev->bus->number) &&
14407                             (bridge->subordinate->busn_res.end >=
14408                              tp->pdev->bus->number)) {
14409                                 tg3_flag_set(tp, 5701_DMA_BUG);
14410                                 pci_dev_put(bridge);
14411                                 break;
14412                         }
14413                 }
14414         }
14415
14416         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14417          * DMA addresses > 40-bit. This bridge may have other additional
14418          * 57xx devices behind it in some 4-port NIC designs for example.
14419          * Any tg3 device found behind the bridge will also need the 40-bit
14420          * DMA workaround.
14421          */
14422         if (tg3_flag(tp, 5780_CLASS)) {
14423                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14424                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14425         } else {
14426                 struct pci_dev *bridge = NULL;
14427
14428                 do {
14429                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14430                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14431                                                 bridge);
14432                         if (bridge && bridge->subordinate &&
14433                             (bridge->subordinate->number <=
14434                              tp->pdev->bus->number) &&
14435                             (bridge->subordinate->busn_res.end >=
14436                              tp->pdev->bus->number)) {
14437                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14438                                 pci_dev_put(bridge);
14439                                 break;
14440                         }
14441                 } while (bridge);
14442         }
14443
14444         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14446                 tp->pdev_peer = tg3_find_peer(tp);
14447
14448         /* Determine TSO capabilities */
14449         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14450                 ; /* Do nothing. HW bug. */
14451         else if (tg3_flag(tp, 57765_PLUS))
14452                 tg3_flag_set(tp, HW_TSO_3);
14453         else if (tg3_flag(tp, 5755_PLUS) ||
14454                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14455                 tg3_flag_set(tp, HW_TSO_2);
14456         else if (tg3_flag(tp, 5750_PLUS)) {
14457                 tg3_flag_set(tp, HW_TSO_1);
14458                 tg3_flag_set(tp, TSO_BUG);
14459                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14460                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14461                         tg3_flag_clear(tp, TSO_BUG);
14462         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14463                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14464                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14465                         tg3_flag_set(tp, TSO_BUG);
14466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14467                         tp->fw_needed = FIRMWARE_TG3TSO5;
14468                 else
14469                         tp->fw_needed = FIRMWARE_TG3TSO;
14470         }
14471
14472         /* Selectively allow TSO based on operating conditions */
14473         if (tg3_flag(tp, HW_TSO_1) ||
14474             tg3_flag(tp, HW_TSO_2) ||
14475             tg3_flag(tp, HW_TSO_3) ||
14476             tp->fw_needed) {
14477                 /* For firmware TSO, assume ASF is disabled.
14478                  * We'll disable TSO later if we discover ASF
14479                  * is enabled in tg3_get_eeprom_hw_cfg().
14480                  */
14481                 tg3_flag_set(tp, TSO_CAPABLE);
14482         } else {
14483                 tg3_flag_clear(tp, TSO_CAPABLE);
14484                 tg3_flag_clear(tp, TSO_BUG);
14485                 tp->fw_needed = NULL;
14486         }
14487
14488         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14489                 tp->fw_needed = FIRMWARE_TG3;
14490
14491         tp->irq_max = 1;
14492
14493         if (tg3_flag(tp, 5750_PLUS)) {
14494                 tg3_flag_set(tp, SUPPORT_MSI);
14495                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14496                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14497                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14498                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14499                      tp->pdev_peer == tp->pdev))
14500                         tg3_flag_clear(tp, SUPPORT_MSI);
14501
14502                 if (tg3_flag(tp, 5755_PLUS) ||
14503                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14504                         tg3_flag_set(tp, 1SHOT_MSI);
14505                 }
14506
14507                 if (tg3_flag(tp, 57765_PLUS)) {
14508                         tg3_flag_set(tp, SUPPORT_MSIX);
14509                         tp->irq_max = TG3_IRQ_MAX_VECS;
14510                         tg3_rss_init_dflt_indir_tbl(tp);
14511                 }
14512         }
14513
14514         if (tg3_flag(tp, 5755_PLUS) ||
14515             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14516                 tg3_flag_set(tp, SHORT_DMA_BUG);
14517
14518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14519                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14520
14521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14524                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14525
14526         if (tg3_flag(tp, 57765_PLUS) &&
14527             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14528                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14529
14530         if (!tg3_flag(tp, 5705_PLUS) ||
14531             tg3_flag(tp, 5780_CLASS) ||
14532             tg3_flag(tp, USE_JUMBO_BDFLAG))
14533                 tg3_flag_set(tp, JUMBO_CAPABLE);
14534
14535         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14536                               &pci_state_reg);
14537
14538         if (pci_is_pcie(tp->pdev)) {
14539                 u16 lnkctl;
14540
14541                 tg3_flag_set(tp, PCI_EXPRESS);
14542
14543                 pci_read_config_word(tp->pdev,
14544                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14545                                      &lnkctl);
14546                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14547                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14548                             ASIC_REV_5906) {
14549                                 tg3_flag_clear(tp, HW_TSO_2);
14550                                 tg3_flag_clear(tp, TSO_CAPABLE);
14551                         }
14552                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14553                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14554                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14555                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14556                                 tg3_flag_set(tp, CLKREQ_BUG);
14557                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14558                         tg3_flag_set(tp, L1PLLPD_EN);
14559                 }
14560         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14561                 /* BCM5785 devices are effectively PCIe devices, and should
14562                  * follow PCIe codepaths, but do not have a PCIe capabilities
14563                  * section.
14564                  */
14565                 tg3_flag_set(tp, PCI_EXPRESS);
14566         } else if (!tg3_flag(tp, 5705_PLUS) ||
14567                    tg3_flag(tp, 5780_CLASS)) {
14568                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14569                 if (!tp->pcix_cap) {
14570                         dev_err(&tp->pdev->dev,
14571                                 "Cannot find PCI-X capability, aborting\n");
14572                         return -EIO;
14573                 }
14574
14575                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14576                         tg3_flag_set(tp, PCIX_MODE);
14577         }
14578
14579         /* If we have an AMD 762 or VIA K8T800 chipset, write
14580          * reordering to the mailbox registers done by the host
14581          * controller can cause major troubles.  We read back from
14582          * every mailbox register write to force the writes to be
14583          * posted to the chip in order.
14584          */
14585         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14586             !tg3_flag(tp, PCI_EXPRESS))
14587                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14588
14589         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14590                              &tp->pci_cacheline_sz);
14591         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14592                              &tp->pci_lat_timer);
14593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14594             tp->pci_lat_timer < 64) {
14595                 tp->pci_lat_timer = 64;
14596                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14597                                       tp->pci_lat_timer);
14598         }
14599
14600         /* Important! -- It is critical that the PCI-X hw workaround
14601          * situation is decided before the first MMIO register access.
14602          */
14603         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14604                 /* 5700 BX chips need to have their TX producer index
14605                  * mailboxes written twice to workaround a bug.
14606                  */
14607                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14608
14609                 /* If we are in PCI-X mode, enable register write workaround.
14610                  *
14611                  * The workaround is to use indirect register accesses
14612                  * for all chip writes not to mailbox registers.
14613                  */
14614                 if (tg3_flag(tp, PCIX_MODE)) {
14615                         u32 pm_reg;
14616
14617                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14618
14619                         /* The chip can have it's power management PCI config
14620                          * space registers clobbered due to this bug.
14621                          * So explicitly force the chip into D0 here.
14622                          */
14623                         pci_read_config_dword(tp->pdev,
14624                                               tp->pm_cap + PCI_PM_CTRL,
14625                                               &pm_reg);
14626                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14627                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14628                         pci_write_config_dword(tp->pdev,
14629                                                tp->pm_cap + PCI_PM_CTRL,
14630                                                pm_reg);
14631
14632                         /* Also, force SERR#/PERR# in PCI command. */
14633                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14634                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14635                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14636                 }
14637         }
14638
14639         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14640                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14641         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14642                 tg3_flag_set(tp, PCI_32BIT);
14643
14644         /* Chip-specific fixup from Broadcom driver */
14645         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14646             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14647                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14648                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14649         }
14650
14651         /* Default fast path register access methods */
14652         tp->read32 = tg3_read32;
14653         tp->write32 = tg3_write32;
14654         tp->read32_mbox = tg3_read32;
14655         tp->write32_mbox = tg3_write32;
14656         tp->write32_tx_mbox = tg3_write32;
14657         tp->write32_rx_mbox = tg3_write32;
14658
14659         /* Various workaround register access methods */
14660         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14661                 tp->write32 = tg3_write_indirect_reg32;
14662         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14663                  (tg3_flag(tp, PCI_EXPRESS) &&
14664                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14665                 /*
14666                  * Back to back register writes can cause problems on these
14667                  * chips, the workaround is to read back all reg writes
14668                  * except those to mailbox regs.
14669                  *
14670                  * See tg3_write_indirect_reg32().
14671                  */
14672                 tp->write32 = tg3_write_flush_reg32;
14673         }
14674
14675         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14676                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14677                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14678                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14679         }
14680
14681         if (tg3_flag(tp, ICH_WORKAROUND)) {
14682                 tp->read32 = tg3_read_indirect_reg32;
14683                 tp->write32 = tg3_write_indirect_reg32;
14684                 tp->read32_mbox = tg3_read_indirect_mbox;
14685                 tp->write32_mbox = tg3_write_indirect_mbox;
14686                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14687                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14688
14689                 iounmap(tp->regs);
14690                 tp->regs = NULL;
14691
14692                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14693                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14694                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14695         }
14696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14697                 tp->read32_mbox = tg3_read32_mbox_5906;
14698                 tp->write32_mbox = tg3_write32_mbox_5906;
14699                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14700                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14701         }
14702
14703         if (tp->write32 == tg3_write_indirect_reg32 ||
14704             (tg3_flag(tp, PCIX_MODE) &&
14705              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14706               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14707                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14708
14709         /* The memory arbiter has to be enabled in order for SRAM accesses
14710          * to succeed.  Normally on powerup the tg3 chip firmware will make
14711          * sure it is enabled, but other entities such as system netboot
14712          * code might disable it.
14713          */
14714         val = tr32(MEMARB_MODE);
14715         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14716
14717         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14719             tg3_flag(tp, 5780_CLASS)) {
14720                 if (tg3_flag(tp, PCIX_MODE)) {
14721                         pci_read_config_dword(tp->pdev,
14722                                               tp->pcix_cap + PCI_X_STATUS,
14723                                               &val);
14724                         tp->pci_fn = val & 0x7;
14725                 }
14726         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14727                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14728                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14729                     NIC_SRAM_CPMUSTAT_SIG) {
14730                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14731                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14732                 }
14733         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14734                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14735                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14736                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14737                     NIC_SRAM_CPMUSTAT_SIG) {
14738                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14739                                      TG3_CPMU_STATUS_FSHFT_5719;
14740                 }
14741         }
14742
14743         /* Get eeprom hw config before calling tg3_set_power_state().
14744          * In particular, the TG3_FLAG_IS_NIC flag must be
14745          * determined before calling tg3_set_power_state() so that
14746          * we know whether or not to switch out of Vaux power.
14747          * When the flag is set, it means that GPIO1 is used for eeprom
14748          * write protect and also implies that it is a LOM where GPIOs
14749          * are not used to switch power.
14750          */
14751         tg3_get_eeprom_hw_cfg(tp);
14752
14753         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14754                 tg3_flag_clear(tp, TSO_CAPABLE);
14755                 tg3_flag_clear(tp, TSO_BUG);
14756                 tp->fw_needed = NULL;
14757         }
14758
14759         if (tg3_flag(tp, ENABLE_APE)) {
14760                 /* Allow reads and writes to the
14761                  * APE register and memory space.
14762                  */
14763                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14764                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14765                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14766                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14767                                        pci_state_reg);
14768
14769                 tg3_ape_lock_init(tp);
14770         }
14771
14772         /* Set up tp->grc_local_ctrl before calling
14773          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14774          * will bring 5700's external PHY out of reset.
14775          * It is also used as eeprom write protect on LOMs.
14776          */
14777         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14779             tg3_flag(tp, EEPROM_WRITE_PROT))
14780                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14781                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14782         /* Unused GPIO3 must be driven as output on 5752 because there
14783          * are no pull-up resistors on unused GPIO pins.
14784          */
14785         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14786                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14787
14788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14790             tg3_flag(tp, 57765_CLASS))
14791                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14792
14793         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14794             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14795                 /* Turn off the debug UART. */
14796                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14797                 if (tg3_flag(tp, IS_NIC))
14798                         /* Keep VMain power. */
14799                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14800                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14801         }
14802
14803         /* Switch out of Vaux if it is a NIC */
14804         tg3_pwrsrc_switch_to_vmain(tp);
14805
14806         /* Derive initial jumbo mode from MTU assigned in
14807          * ether_setup() via the alloc_etherdev() call
14808          */
14809         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14810                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14811
14812         /* Determine WakeOnLan speed to use. */
14813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14814             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14815             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14816             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14817                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14818         } else {
14819                 tg3_flag_set(tp, WOL_SPEED_100MB);
14820         }
14821
14822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14823                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14824
14825         /* A few boards don't want Ethernet@WireSpeed phy feature */
14826         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14827             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14828              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14829              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14830             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14831             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14832                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14833
14834         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14835             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14836                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14837         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14838                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14839
14840         if (tg3_flag(tp, 5705_PLUS) &&
14841             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14842             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14843             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14844             !tg3_flag(tp, 57765_PLUS)) {
14845                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14846                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14847                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14848                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14849                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14850                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14851                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14852                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14853                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14854                 } else
14855                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14856         }
14857
14858         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14859             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14860                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14861                 if (tp->phy_otp == 0)
14862                         tp->phy_otp = TG3_OTP_DEFAULT;
14863         }
14864
14865         if (tg3_flag(tp, CPMU_PRESENT))
14866                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14867         else
14868                 tp->mi_mode = MAC_MI_MODE_BASE;
14869
14870         tp->coalesce_mode = 0;
14871         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14872             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14873                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14874
14875         /* Set these bits to enable statistics workaround. */
14876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14877             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14878             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14879                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14880                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14881         }
14882
14883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14884             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14885                 tg3_flag_set(tp, USE_PHYLIB);
14886
14887         err = tg3_mdio_init(tp);
14888         if (err)
14889                 return err;
14890
14891         /* Initialize data/descriptor byte/word swapping. */
14892         val = tr32(GRC_MODE);
14893         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14894                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14895                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14896                         GRC_MODE_B2HRX_ENABLE |
14897                         GRC_MODE_HTX2B_ENABLE |
14898                         GRC_MODE_HOST_STACKUP);
14899         else
14900                 val &= GRC_MODE_HOST_STACKUP;
14901
14902         tw32(GRC_MODE, val | tp->grc_mode);
14903
14904         tg3_switch_clocks(tp);
14905
14906         /* Clear this out for sanity. */
14907         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14908
14909         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14910                               &pci_state_reg);
14911         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14912             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14913                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14914
14915                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14916                     chiprevid == CHIPREV_ID_5701_B0 ||
14917                     chiprevid == CHIPREV_ID_5701_B2 ||
14918                     chiprevid == CHIPREV_ID_5701_B5) {
14919                         void __iomem *sram_base;
14920
14921                         /* Write some dummy words into the SRAM status block
14922                          * area, see if it reads back correctly.  If the return
14923                          * value is bad, force enable the PCIX workaround.
14924                          */
14925                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14926
14927                         writel(0x00000000, sram_base);
14928                         writel(0x00000000, sram_base + 4);
14929                         writel(0xffffffff, sram_base + 4);
14930                         if (readl(sram_base) != 0x00000000)
14931                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14932                 }
14933         }
14934
14935         udelay(50);
14936         tg3_nvram_init(tp);
14937
14938         grc_misc_cfg = tr32(GRC_MISC_CFG);
14939         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14940
14941         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14942             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14943              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14944                 tg3_flag_set(tp, IS_5788);
14945
14946         if (!tg3_flag(tp, IS_5788) &&
14947             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14948                 tg3_flag_set(tp, TAGGED_STATUS);
14949         if (tg3_flag(tp, TAGGED_STATUS)) {
14950                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14951                                       HOSTCC_MODE_CLRTICK_TXBD);
14952
14953                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14954                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14955                                        tp->misc_host_ctrl);
14956         }
14957
14958         /* Preserve the APE MAC_MODE bits */
14959         if (tg3_flag(tp, ENABLE_APE))
14960                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14961         else
14962                 tp->mac_mode = 0;
14963
14964         /* these are limited to 10/100 only */
14965         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14966              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14967             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14968              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14969              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14970               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14971               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14972             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14973              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14974               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14975               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14976             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14977             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14978             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14979             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14980                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14981
14982         err = tg3_phy_probe(tp);
14983         if (err) {
14984                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14985                 /* ... but do not return immediately ... */
14986                 tg3_mdio_fini(tp);
14987         }
14988
14989         tg3_read_vpd(tp);
14990         tg3_read_fw_ver(tp);
14991
14992         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14993                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14994         } else {
14995                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14996                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14997                 else
14998                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14999         }
15000
15001         /* 5700 {AX,BX} chips have a broken status block link
15002          * change bit implementation, so we must use the
15003          * status register in those cases.
15004          */
15005         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15006                 tg3_flag_set(tp, USE_LINKCHG_REG);
15007         else
15008                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15009
15010         /* The led_ctrl is set during tg3_phy_probe, here we might
15011          * have to force the link status polling mechanism based
15012          * upon subsystem IDs.
15013          */
15014         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15015             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15016             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15017                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15018                 tg3_flag_set(tp, USE_LINKCHG_REG);
15019         }
15020
15021         /* For all SERDES we poll the MAC status register. */
15022         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15023                 tg3_flag_set(tp, POLL_SERDES);
15024         else
15025                 tg3_flag_clear(tp, POLL_SERDES);
15026
15027         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15028         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15030             tg3_flag(tp, PCIX_MODE)) {
15031                 tp->rx_offset = NET_SKB_PAD;
15032 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15033                 tp->rx_copy_thresh = ~(u16)0;
15034 #endif
15035         }
15036
15037         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15038         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15039         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15040
15041         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15042
15043         /* Increment the rx prod index on the rx std ring by at most
15044          * 8 for these chips to workaround hw errata.
15045          */
15046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15048             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15049                 tp->rx_std_max_post = 8;
15050
15051         if (tg3_flag(tp, ASPM_WORKAROUND))
15052                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15053                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15054
15055         return err;
15056 }
15057
15058 #ifdef CONFIG_SPARC
15059 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15060 {
15061         struct net_device *dev = tp->dev;
15062         struct pci_dev *pdev = tp->pdev;
15063         struct device_node *dp = pci_device_to_OF_node(pdev);
15064         const unsigned char *addr;
15065         int len;
15066
15067         addr = of_get_property(dp, "local-mac-address", &len);
15068         if (addr && len == 6) {
15069                 memcpy(dev->dev_addr, addr, 6);
15070                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15071                 return 0;
15072         }
15073         return -ENODEV;
15074 }
15075
15076 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15077 {
15078         struct net_device *dev = tp->dev;
15079
15080         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15081         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15082         return 0;
15083 }
15084 #endif
15085
15086 static int __devinit tg3_get_device_address(struct tg3 *tp)
15087 {
15088         struct net_device *dev = tp->dev;
15089         u32 hi, lo, mac_offset;
15090         int addr_ok = 0;
15091
15092 #ifdef CONFIG_SPARC
15093         if (!tg3_get_macaddr_sparc(tp))
15094                 return 0;
15095 #endif
15096
15097         mac_offset = 0x7c;
15098         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15099             tg3_flag(tp, 5780_CLASS)) {
15100                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15101                         mac_offset = 0xcc;
15102                 if (tg3_nvram_lock(tp))
15103                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15104                 else
15105                         tg3_nvram_unlock(tp);
15106         } else if (tg3_flag(tp, 5717_PLUS)) {
15107                 if (tp->pci_fn & 1)
15108                         mac_offset = 0xcc;
15109                 if (tp->pci_fn > 1)
15110                         mac_offset += 0x18c;
15111         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15112                 mac_offset = 0x10;
15113
15114         /* First try to get it from MAC address mailbox. */
15115         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15116         if ((hi >> 16) == 0x484b) {
15117                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15118                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15119
15120                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15121                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15122                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15123                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15124                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15125
15126                 /* Some old bootcode may report a 0 MAC address in SRAM */
15127                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15128         }
15129         if (!addr_ok) {
15130                 /* Next, try NVRAM. */
15131                 if (!tg3_flag(tp, NO_NVRAM) &&
15132                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15133                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15134                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15135                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15136                 }
15137                 /* Finally just fetch it out of the MAC control regs. */
15138                 else {
15139                         hi = tr32(MAC_ADDR_0_HIGH);
15140                         lo = tr32(MAC_ADDR_0_LOW);
15141
15142                         dev->dev_addr[5] = lo & 0xff;
15143                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15144                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15145                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15146                         dev->dev_addr[1] = hi & 0xff;
15147                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15148                 }
15149         }
15150
15151         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15152 #ifdef CONFIG_SPARC
15153                 if (!tg3_get_default_macaddr_sparc(tp))
15154                         return 0;
15155 #endif
15156                 return -EINVAL;
15157         }
15158         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15159         return 0;
15160 }
15161
15162 #define BOUNDARY_SINGLE_CACHELINE       1
15163 #define BOUNDARY_MULTI_CACHELINE        2
15164
15165 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15166 {
15167         int cacheline_size;
15168         u8 byte;
15169         int goal;
15170
15171         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15172         if (byte == 0)
15173                 cacheline_size = 1024;
15174         else
15175                 cacheline_size = (int) byte * 4;
15176
15177         /* On 5703 and later chips, the boundary bits have no
15178          * effect.
15179          */
15180         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15181             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15182             !tg3_flag(tp, PCI_EXPRESS))
15183                 goto out;
15184
15185 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15186         goal = BOUNDARY_MULTI_CACHELINE;
15187 #else
15188 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15189         goal = BOUNDARY_SINGLE_CACHELINE;
15190 #else
15191         goal = 0;
15192 #endif
15193 #endif
15194
15195         if (tg3_flag(tp, 57765_PLUS)) {
15196                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15197                 goto out;
15198         }
15199
15200         if (!goal)
15201                 goto out;
15202
15203         /* PCI controllers on most RISC systems tend to disconnect
15204          * when a device tries to burst across a cache-line boundary.
15205          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15206          *
15207          * Unfortunately, for PCI-E there are only limited
15208          * write-side controls for this, and thus for reads
15209          * we will still get the disconnects.  We'll also waste
15210          * these PCI cycles for both read and write for chips
15211          * other than 5700 and 5701 which do not implement the
15212          * boundary bits.
15213          */
15214         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15215                 switch (cacheline_size) {
15216                 case 16:
15217                 case 32:
15218                 case 64:
15219                 case 128:
15220                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15221                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15222                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15223                         } else {
15224                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15225                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15226                         }
15227                         break;
15228
15229                 case 256:
15230                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15231                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15232                         break;
15233
15234                 default:
15235                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15236                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15237                         break;
15238                 }
15239         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15240                 switch (cacheline_size) {
15241                 case 16:
15242                 case 32:
15243                 case 64:
15244                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15245                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15246                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15247                                 break;
15248                         }
15249                         /* fallthrough */
15250                 case 128:
15251                 default:
15252                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15253                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15254                         break;
15255                 }
15256         } else {
15257                 switch (cacheline_size) {
15258                 case 16:
15259                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15260                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15261                                         DMA_RWCTRL_WRITE_BNDRY_16);
15262                                 break;
15263                         }
15264                         /* fallthrough */
15265                 case 32:
15266                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15267                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15268                                         DMA_RWCTRL_WRITE_BNDRY_32);
15269                                 break;
15270                         }
15271                         /* fallthrough */
15272                 case 64:
15273                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15274                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15275                                         DMA_RWCTRL_WRITE_BNDRY_64);
15276                                 break;
15277                         }
15278                         /* fallthrough */
15279                 case 128:
15280                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15281                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15282                                         DMA_RWCTRL_WRITE_BNDRY_128);
15283                                 break;
15284                         }
15285                         /* fallthrough */
15286                 case 256:
15287                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15288                                 DMA_RWCTRL_WRITE_BNDRY_256);
15289                         break;
15290                 case 512:
15291                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15292                                 DMA_RWCTRL_WRITE_BNDRY_512);
15293                         break;
15294                 case 1024:
15295                 default:
15296                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15297                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15298                         break;
15299                 }
15300         }
15301
15302 out:
15303         return val;
15304 }
15305
15306 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15307 {
15308         struct tg3_internal_buffer_desc test_desc;
15309         u32 sram_dma_descs;
15310         int i, ret;
15311
15312         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15313
15314         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15315         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15316         tw32(RDMAC_STATUS, 0);
15317         tw32(WDMAC_STATUS, 0);
15318
15319         tw32(BUFMGR_MODE, 0);
15320         tw32(FTQ_RESET, 0);
15321
15322         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15323         test_desc.addr_lo = buf_dma & 0xffffffff;
15324         test_desc.nic_mbuf = 0x00002100;
15325         test_desc.len = size;
15326
15327         /*
15328          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15329          * the *second* time the tg3 driver was getting loaded after an
15330          * initial scan.
15331          *
15332          * Broadcom tells me:
15333          *   ...the DMA engine is connected to the GRC block and a DMA
15334          *   reset may affect the GRC block in some unpredictable way...
15335          *   The behavior of resets to individual blocks has not been tested.
15336          *
15337          * Broadcom noted the GRC reset will also reset all sub-components.
15338          */
15339         if (to_device) {
15340                 test_desc.cqid_sqid = (13 << 8) | 2;
15341
15342                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15343                 udelay(40);
15344         } else {
15345                 test_desc.cqid_sqid = (16 << 8) | 7;
15346
15347                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15348                 udelay(40);
15349         }
15350         test_desc.flags = 0x00000005;
15351
15352         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15353                 u32 val;
15354
15355                 val = *(((u32 *)&test_desc) + i);
15356                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15357                                        sram_dma_descs + (i * sizeof(u32)));
15358                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15359         }
15360         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15361
15362         if (to_device)
15363                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15364         else
15365                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15366
15367         ret = -ENODEV;
15368         for (i = 0; i < 40; i++) {
15369                 u32 val;
15370
15371                 if (to_device)
15372                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15373                 else
15374                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15375                 if ((val & 0xffff) == sram_dma_descs) {
15376                         ret = 0;
15377                         break;
15378                 }
15379
15380                 udelay(100);
15381         }
15382
15383         return ret;
15384 }
15385
15386 #define TEST_BUFFER_SIZE        0x2000
15387
15388 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15389         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15390         { },
15391 };
15392
15393 static int __devinit tg3_test_dma(struct tg3 *tp)
15394 {
15395         dma_addr_t buf_dma;
15396         u32 *buf, saved_dma_rwctrl;
15397         int ret = 0;
15398
15399         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15400                                  &buf_dma, GFP_KERNEL);
15401         if (!buf) {
15402                 ret = -ENOMEM;
15403                 goto out_nofree;
15404         }
15405
15406         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15407                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15408
15409         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15410
15411         if (tg3_flag(tp, 57765_PLUS))
15412                 goto out;
15413
15414         if (tg3_flag(tp, PCI_EXPRESS)) {
15415                 /* DMA read watermark not used on PCIE */
15416                 tp->dma_rwctrl |= 0x00180000;
15417         } else if (!tg3_flag(tp, PCIX_MODE)) {
15418                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15419                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15420                         tp->dma_rwctrl |= 0x003f0000;
15421                 else
15422                         tp->dma_rwctrl |= 0x003f000f;
15423         } else {
15424                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15425                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15426                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15427                         u32 read_water = 0x7;
15428
15429                         /* If the 5704 is behind the EPB bridge, we can
15430                          * do the less restrictive ONE_DMA workaround for
15431                          * better performance.
15432                          */
15433                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15434                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15435                                 tp->dma_rwctrl |= 0x8000;
15436                         else if (ccval == 0x6 || ccval == 0x7)
15437                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15438
15439                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15440                                 read_water = 4;
15441                         /* Set bit 23 to enable PCIX hw bug fix */
15442                         tp->dma_rwctrl |=
15443                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15444                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15445                                 (1 << 23);
15446                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15447                         /* 5780 always in PCIX mode */
15448                         tp->dma_rwctrl |= 0x00144000;
15449                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15450                         /* 5714 always in PCIX mode */
15451                         tp->dma_rwctrl |= 0x00148000;
15452                 } else {
15453                         tp->dma_rwctrl |= 0x001b000f;
15454                 }
15455         }
15456
15457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15458             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15459                 tp->dma_rwctrl &= 0xfffffff0;
15460
15461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15462             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15463                 /* Remove this if it causes problems for some boards. */
15464                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15465
15466                 /* On 5700/5701 chips, we need to set this bit.
15467                  * Otherwise the chip will issue cacheline transactions
15468                  * to streamable DMA memory with not all the byte
15469                  * enables turned on.  This is an error on several
15470                  * RISC PCI controllers, in particular sparc64.
15471                  *
15472                  * On 5703/5704 chips, this bit has been reassigned
15473                  * a different meaning.  In particular, it is used
15474                  * on those chips to enable a PCI-X workaround.
15475                  */
15476                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15477         }
15478
15479         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15480
15481 #if 0
15482         /* Unneeded, already done by tg3_get_invariants.  */
15483         tg3_switch_clocks(tp);
15484 #endif
15485
15486         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15487             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15488                 goto out;
15489
15490         /* It is best to perform DMA test with maximum write burst size
15491          * to expose the 5700/5701 write DMA bug.
15492          */
15493         saved_dma_rwctrl = tp->dma_rwctrl;
15494         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15495         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15496
15497         while (1) {
15498                 u32 *p = buf, i;
15499
15500                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15501                         p[i] = i;
15502
15503                 /* Send the buffer to the chip. */
15504                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15505                 if (ret) {
15506                         dev_err(&tp->pdev->dev,
15507                                 "%s: Buffer write failed. err = %d\n",
15508                                 __func__, ret);
15509                         break;
15510                 }
15511
15512 #if 0
15513                 /* validate data reached card RAM correctly. */
15514                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15515                         u32 val;
15516                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15517                         if (le32_to_cpu(val) != p[i]) {
15518                                 dev_err(&tp->pdev->dev,
15519                                         "%s: Buffer corrupted on device! "
15520                                         "(%d != %d)\n", __func__, val, i);
15521                                 /* ret = -ENODEV here? */
15522                         }
15523                         p[i] = 0;
15524                 }
15525 #endif
15526                 /* Now read it back. */
15527                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15528                 if (ret) {
15529                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15530                                 "err = %d\n", __func__, ret);
15531                         break;
15532                 }
15533
15534                 /* Verify it. */
15535                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15536                         if (p[i] == i)
15537                                 continue;
15538
15539                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15540                             DMA_RWCTRL_WRITE_BNDRY_16) {
15541                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15542                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15543                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15544                                 break;
15545                         } else {
15546                                 dev_err(&tp->pdev->dev,
15547                                         "%s: Buffer corrupted on read back! "
15548                                         "(%d != %d)\n", __func__, p[i], i);
15549                                 ret = -ENODEV;
15550                                 goto out;
15551                         }
15552                 }
15553
15554                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15555                         /* Success. */
15556                         ret = 0;
15557                         break;
15558                 }
15559         }
15560         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15561             DMA_RWCTRL_WRITE_BNDRY_16) {
15562                 /* DMA test passed without adjusting DMA boundary,
15563                  * now look for chipsets that are known to expose the
15564                  * DMA bug without failing the test.
15565                  */
15566                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15567                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15568                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15569                 } else {
15570                         /* Safe to use the calculated DMA boundary. */
15571                         tp->dma_rwctrl = saved_dma_rwctrl;
15572                 }
15573
15574                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15575         }
15576
15577 out:
15578         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15579 out_nofree:
15580         return ret;
15581 }
15582
15583 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15584 {
15585         if (tg3_flag(tp, 57765_PLUS)) {
15586                 tp->bufmgr_config.mbuf_read_dma_low_water =
15587                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15588                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15589                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15590                 tp->bufmgr_config.mbuf_high_water =
15591                         DEFAULT_MB_HIGH_WATER_57765;
15592
15593                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15594                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15595                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15596                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15597                 tp->bufmgr_config.mbuf_high_water_jumbo =
15598                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15599         } else if (tg3_flag(tp, 5705_PLUS)) {
15600                 tp->bufmgr_config.mbuf_read_dma_low_water =
15601                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15602                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15603                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15604                 tp->bufmgr_config.mbuf_high_water =
15605                         DEFAULT_MB_HIGH_WATER_5705;
15606                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15607                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15608                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15609                         tp->bufmgr_config.mbuf_high_water =
15610                                 DEFAULT_MB_HIGH_WATER_5906;
15611                 }
15612
15613                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15614                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15615                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15616                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15617                 tp->bufmgr_config.mbuf_high_water_jumbo =
15618                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15619         } else {
15620                 tp->bufmgr_config.mbuf_read_dma_low_water =
15621                         DEFAULT_MB_RDMA_LOW_WATER;
15622                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15623                         DEFAULT_MB_MACRX_LOW_WATER;
15624                 tp->bufmgr_config.mbuf_high_water =
15625                         DEFAULT_MB_HIGH_WATER;
15626
15627                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15628                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15629                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15630                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15631                 tp->bufmgr_config.mbuf_high_water_jumbo =
15632                         DEFAULT_MB_HIGH_WATER_JUMBO;
15633         }
15634
15635         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15636         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15637 }
15638
15639 static char * __devinit tg3_phy_string(struct tg3 *tp)
15640 {
15641         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15642         case TG3_PHY_ID_BCM5400:        return "5400";
15643         case TG3_PHY_ID_BCM5401:        return "5401";
15644         case TG3_PHY_ID_BCM5411:        return "5411";
15645         case TG3_PHY_ID_BCM5701:        return "5701";
15646         case TG3_PHY_ID_BCM5703:        return "5703";
15647         case TG3_PHY_ID_BCM5704:        return "5704";
15648         case TG3_PHY_ID_BCM5705:        return "5705";
15649         case TG3_PHY_ID_BCM5750:        return "5750";
15650         case TG3_PHY_ID_BCM5752:        return "5752";
15651         case TG3_PHY_ID_BCM5714:        return "5714";
15652         case TG3_PHY_ID_BCM5780:        return "5780";
15653         case TG3_PHY_ID_BCM5755:        return "5755";
15654         case TG3_PHY_ID_BCM5787:        return "5787";
15655         case TG3_PHY_ID_BCM5784:        return "5784";
15656         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15657         case TG3_PHY_ID_BCM5906:        return "5906";
15658         case TG3_PHY_ID_BCM5761:        return "5761";
15659         case TG3_PHY_ID_BCM5718C:       return "5718C";
15660         case TG3_PHY_ID_BCM5718S:       return "5718S";
15661         case TG3_PHY_ID_BCM57765:       return "57765";
15662         case TG3_PHY_ID_BCM5719C:       return "5719C";
15663         case TG3_PHY_ID_BCM5720C:       return "5720C";
15664         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15665         case 0:                 return "serdes";
15666         default:                return "unknown";
15667         }
15668 }
15669
15670 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15671 {
15672         if (tg3_flag(tp, PCI_EXPRESS)) {
15673                 strcpy(str, "PCI Express");
15674                 return str;
15675         } else if (tg3_flag(tp, PCIX_MODE)) {
15676                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15677
15678                 strcpy(str, "PCIX:");
15679
15680                 if ((clock_ctrl == 7) ||
15681                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15682                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15683                         strcat(str, "133MHz");
15684                 else if (clock_ctrl == 0)
15685                         strcat(str, "33MHz");
15686                 else if (clock_ctrl == 2)
15687                         strcat(str, "50MHz");
15688                 else if (clock_ctrl == 4)
15689                         strcat(str, "66MHz");
15690                 else if (clock_ctrl == 6)
15691                         strcat(str, "100MHz");
15692         } else {
15693                 strcpy(str, "PCI:");
15694                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15695                         strcat(str, "66MHz");
15696                 else
15697                         strcat(str, "33MHz");
15698         }
15699         if (tg3_flag(tp, PCI_32BIT))
15700                 strcat(str, ":32-bit");
15701         else
15702                 strcat(str, ":64-bit");
15703         return str;
15704 }
15705
15706 static void __devinit tg3_init_coal(struct tg3 *tp)
15707 {
15708         struct ethtool_coalesce *ec = &tp->coal;
15709
15710         memset(ec, 0, sizeof(*ec));
15711         ec->cmd = ETHTOOL_GCOALESCE;
15712         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15713         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15714         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15715         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15716         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15717         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15718         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15719         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15720         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15721
15722         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15723                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15724                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15725                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15726                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15727                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15728         }
15729
15730         if (tg3_flag(tp, 5705_PLUS)) {
15731                 ec->rx_coalesce_usecs_irq = 0;
15732                 ec->tx_coalesce_usecs_irq = 0;
15733                 ec->stats_block_coalesce_usecs = 0;
15734         }
15735 }
15736
15737 static int __devinit tg3_init_one(struct pci_dev *pdev,
15738                                   const struct pci_device_id *ent)
15739 {
15740         struct net_device *dev;
15741         struct tg3 *tp;
15742         int i, err, pm_cap;
15743         u32 sndmbx, rcvmbx, intmbx;
15744         char str[40];
15745         u64 dma_mask, persist_dma_mask;
15746         netdev_features_t features = 0;
15747
15748         printk_once(KERN_INFO "%s\n", version);
15749
15750         err = pci_enable_device(pdev);
15751         if (err) {
15752                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15753                 return err;
15754         }
15755
15756         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15757         if (err) {
15758                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15759                 goto err_out_disable_pdev;
15760         }
15761
15762         pci_set_master(pdev);
15763
15764         /* Find power-management capability. */
15765         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15766         if (pm_cap == 0) {
15767                 dev_err(&pdev->dev,
15768                         "Cannot find Power Management capability, aborting\n");
15769                 err = -EIO;
15770                 goto err_out_free_res;
15771         }
15772
15773         err = pci_set_power_state(pdev, PCI_D0);
15774         if (err) {
15775                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15776                 goto err_out_free_res;
15777         }
15778
15779         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15780         if (!dev) {
15781                 err = -ENOMEM;
15782                 goto err_out_power_down;
15783         }
15784
15785         SET_NETDEV_DEV(dev, &pdev->dev);
15786
15787         tp = netdev_priv(dev);
15788         tp->pdev = pdev;
15789         tp->dev = dev;
15790         tp->pm_cap = pm_cap;
15791         tp->rx_mode = TG3_DEF_RX_MODE;
15792         tp->tx_mode = TG3_DEF_TX_MODE;
15793
15794         if (tg3_debug > 0)
15795                 tp->msg_enable = tg3_debug;
15796         else
15797                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15798
15799         /* The word/byte swap controls here control register access byte
15800          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15801          * setting below.
15802          */
15803         tp->misc_host_ctrl =
15804                 MISC_HOST_CTRL_MASK_PCI_INT |
15805                 MISC_HOST_CTRL_WORD_SWAP |
15806                 MISC_HOST_CTRL_INDIR_ACCESS |
15807                 MISC_HOST_CTRL_PCISTATE_RW;
15808
15809         /* The NONFRM (non-frame) byte/word swap controls take effect
15810          * on descriptor entries, anything which isn't packet data.
15811          *
15812          * The StrongARM chips on the board (one for tx, one for rx)
15813          * are running in big-endian mode.
15814          */
15815         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15816                         GRC_MODE_WSWAP_NONFRM_DATA);
15817 #ifdef __BIG_ENDIAN
15818         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15819 #endif
15820         spin_lock_init(&tp->lock);
15821         spin_lock_init(&tp->indirect_lock);
15822         INIT_WORK(&tp->reset_task, tg3_reset_task);
15823
15824         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15825         if (!tp->regs) {
15826                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15827                 err = -ENOMEM;
15828                 goto err_out_free_dev;
15829         }
15830
15831         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15832             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15833             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15834             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15835             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15836             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15837             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15838             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15839                 tg3_flag_set(tp, ENABLE_APE);
15840                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15841                 if (!tp->aperegs) {
15842                         dev_err(&pdev->dev,
15843                                 "Cannot map APE registers, aborting\n");
15844                         err = -ENOMEM;
15845                         goto err_out_iounmap;
15846                 }
15847         }
15848
15849         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15850         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15851
15852         dev->ethtool_ops = &tg3_ethtool_ops;
15853         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15854         dev->netdev_ops = &tg3_netdev_ops;
15855         dev->irq = pdev->irq;
15856
15857         err = tg3_get_invariants(tp);
15858         if (err) {
15859                 dev_err(&pdev->dev,
15860                         "Problem fetching invariants of chip, aborting\n");
15861                 goto err_out_apeunmap;
15862         }
15863
15864         /* The EPB bridge inside 5714, 5715, and 5780 and any
15865          * device behind the EPB cannot support DMA addresses > 40-bit.
15866          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15867          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15868          * do DMA address check in tg3_start_xmit().
15869          */
15870         if (tg3_flag(tp, IS_5788))
15871                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15872         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15873                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15874 #ifdef CONFIG_HIGHMEM
15875                 dma_mask = DMA_BIT_MASK(64);
15876 #endif
15877         } else
15878                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15879
15880         /* Configure DMA attributes. */
15881         if (dma_mask > DMA_BIT_MASK(32)) {
15882                 err = pci_set_dma_mask(pdev, dma_mask);
15883                 if (!err) {
15884                         features |= NETIF_F_HIGHDMA;
15885                         err = pci_set_consistent_dma_mask(pdev,
15886                                                           persist_dma_mask);
15887                         if (err < 0) {
15888                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15889                                         "DMA for consistent allocations\n");
15890                                 goto err_out_apeunmap;
15891                         }
15892                 }
15893         }
15894         if (err || dma_mask == DMA_BIT_MASK(32)) {
15895                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15896                 if (err) {
15897                         dev_err(&pdev->dev,
15898                                 "No usable DMA configuration, aborting\n");
15899                         goto err_out_apeunmap;
15900                 }
15901         }
15902
15903         tg3_init_bufmgr_config(tp);
15904
15905         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15906
15907         /* 5700 B0 chips do not support checksumming correctly due
15908          * to hardware bugs.
15909          */
15910         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15911                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15912
15913                 if (tg3_flag(tp, 5755_PLUS))
15914                         features |= NETIF_F_IPV6_CSUM;
15915         }
15916
15917         /* TSO is on by default on chips that support hardware TSO.
15918          * Firmware TSO on older chips gives lower performance, so it
15919          * is off by default, but can be enabled using ethtool.
15920          */
15921         if ((tg3_flag(tp, HW_TSO_1) ||
15922              tg3_flag(tp, HW_TSO_2) ||
15923              tg3_flag(tp, HW_TSO_3)) &&
15924             (features & NETIF_F_IP_CSUM))
15925                 features |= NETIF_F_TSO;
15926         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15927                 if (features & NETIF_F_IPV6_CSUM)
15928                         features |= NETIF_F_TSO6;
15929                 if (tg3_flag(tp, HW_TSO_3) ||
15930                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15931                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15932                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15933                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15934                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15935                         features |= NETIF_F_TSO_ECN;
15936         }
15937
15938         dev->features |= features;
15939         dev->vlan_features |= features;
15940
15941         /*
15942          * Add loopback capability only for a subset of devices that support
15943          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15944          * loopback for the remaining devices.
15945          */
15946         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15947             !tg3_flag(tp, CPMU_PRESENT))
15948                 /* Add the loopback capability */
15949                 features |= NETIF_F_LOOPBACK;
15950
15951         dev->hw_features |= features;
15952
15953         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15954             !tg3_flag(tp, TSO_CAPABLE) &&
15955             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15956                 tg3_flag_set(tp, MAX_RXPEND_64);
15957                 tp->rx_pending = 63;
15958         }
15959
15960         err = tg3_get_device_address(tp);
15961         if (err) {
15962                 dev_err(&pdev->dev,
15963                         "Could not obtain valid ethernet address, aborting\n");
15964                 goto err_out_apeunmap;
15965         }
15966
15967         /*
15968          * Reset chip in case UNDI or EFI driver did not shutdown
15969          * DMA self test will enable WDMAC and we'll see (spurious)
15970          * pending DMA on the PCI bus at that point.
15971          */
15972         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15973             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15974                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15975                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15976         }
15977
15978         err = tg3_test_dma(tp);
15979         if (err) {
15980                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15981                 goto err_out_apeunmap;
15982         }
15983
15984         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15985         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15986         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15987         for (i = 0; i < tp->irq_max; i++) {
15988                 struct tg3_napi *tnapi = &tp->napi[i];
15989
15990                 tnapi->tp = tp;
15991                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15992
15993                 tnapi->int_mbox = intmbx;
15994                 if (i <= 4)
15995                         intmbx += 0x8;
15996                 else
15997                         intmbx += 0x4;
15998
15999                 tnapi->consmbox = rcvmbx;
16000                 tnapi->prodmbox = sndmbx;
16001
16002                 if (i)
16003                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16004                 else
16005                         tnapi->coal_now = HOSTCC_MODE_NOW;
16006
16007                 if (!tg3_flag(tp, SUPPORT_MSIX))
16008                         break;
16009
16010                 /*
16011                  * If we support MSIX, we'll be using RSS.  If we're using
16012                  * RSS, the first vector only handles link interrupts and the
16013                  * remaining vectors handle rx and tx interrupts.  Reuse the
16014                  * mailbox values for the next iteration.  The values we setup
16015                  * above are still useful for the single vectored mode.
16016                  */
16017                 if (!i)
16018                         continue;
16019
16020                 rcvmbx += 0x8;
16021
16022                 if (sndmbx & 0x4)
16023                         sndmbx -= 0x4;
16024                 else
16025                         sndmbx += 0xc;
16026         }
16027
16028         tg3_init_coal(tp);
16029
16030         pci_set_drvdata(pdev, dev);
16031
16032         if (tg3_flag(tp, 5717_PLUS)) {
16033                 /* Resume a low-power mode */
16034                 tg3_frob_aux_power(tp, false);
16035         }
16036
16037         tg3_timer_init(tp);
16038
16039         err = register_netdev(dev);
16040         if (err) {
16041                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16042                 goto err_out_apeunmap;
16043         }
16044
16045         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16046                     tp->board_part_number,
16047                     tp->pci_chip_rev_id,
16048                     tg3_bus_string(tp, str),
16049                     dev->dev_addr);
16050
16051         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16052                 struct phy_device *phydev;
16053                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16054                 netdev_info(dev,
16055                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16056                             phydev->drv->name, dev_name(&phydev->dev));
16057         } else {
16058                 char *ethtype;
16059
16060                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16061                         ethtype = "10/100Base-TX";
16062                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16063                         ethtype = "1000Base-SX";
16064                 else
16065                         ethtype = "10/100/1000Base-T";
16066
16067                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16068                             "(WireSpeed[%d], EEE[%d])\n",
16069                             tg3_phy_string(tp), ethtype,
16070                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16071                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16072         }
16073
16074         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16075                     (dev->features & NETIF_F_RXCSUM) != 0,
16076                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16077                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16078                     tg3_flag(tp, ENABLE_ASF) != 0,
16079                     tg3_flag(tp, TSO_CAPABLE) != 0);
16080         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16081                     tp->dma_rwctrl,
16082                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16083                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16084
16085         pci_save_state(pdev);
16086
16087         return 0;
16088
16089 err_out_apeunmap:
16090         if (tp->aperegs) {
16091                 iounmap(tp->aperegs);
16092                 tp->aperegs = NULL;
16093         }
16094
16095 err_out_iounmap:
16096         if (tp->regs) {
16097                 iounmap(tp->regs);
16098                 tp->regs = NULL;
16099         }
16100
16101 err_out_free_dev:
16102         free_netdev(dev);
16103
16104 err_out_power_down:
16105         pci_set_power_state(pdev, PCI_D3hot);
16106
16107 err_out_free_res:
16108         pci_release_regions(pdev);
16109
16110 err_out_disable_pdev:
16111         pci_disable_device(pdev);
16112         pci_set_drvdata(pdev, NULL);
16113         return err;
16114 }
16115
16116 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16117 {
16118         struct net_device *dev = pci_get_drvdata(pdev);
16119
16120         if (dev) {
16121                 struct tg3 *tp = netdev_priv(dev);
16122
16123                 release_firmware(tp->fw);
16124
16125                 tg3_reset_task_cancel(tp);
16126
16127                 if (tg3_flag(tp, USE_PHYLIB)) {
16128                         tg3_phy_fini(tp);
16129                         tg3_mdio_fini(tp);
16130                 }
16131
16132                 unregister_netdev(dev);
16133                 if (tp->aperegs) {
16134                         iounmap(tp->aperegs);
16135                         tp->aperegs = NULL;
16136                 }
16137                 if (tp->regs) {
16138                         iounmap(tp->regs);
16139                         tp->regs = NULL;
16140                 }
16141                 free_netdev(dev);
16142                 pci_release_regions(pdev);
16143                 pci_disable_device(pdev);
16144                 pci_set_drvdata(pdev, NULL);
16145         }
16146 }
16147
16148 #ifdef CONFIG_PM_SLEEP
16149 static int tg3_suspend(struct device *device)
16150 {
16151         struct pci_dev *pdev = to_pci_dev(device);
16152         struct net_device *dev = pci_get_drvdata(pdev);
16153         struct tg3 *tp = netdev_priv(dev);
16154         int err;
16155
16156         if (!netif_running(dev))
16157                 return 0;
16158
16159         tg3_reset_task_cancel(tp);
16160         tg3_phy_stop(tp);
16161         tg3_netif_stop(tp);
16162
16163         tg3_timer_stop(tp);
16164
16165         tg3_full_lock(tp, 1);
16166         tg3_disable_ints(tp);
16167         tg3_full_unlock(tp);
16168
16169         netif_device_detach(dev);
16170
16171         tg3_full_lock(tp, 0);
16172         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16173         tg3_flag_clear(tp, INIT_COMPLETE);
16174         tg3_full_unlock(tp);
16175
16176         err = tg3_power_down_prepare(tp);
16177         if (err) {
16178                 int err2;
16179
16180                 tg3_full_lock(tp, 0);
16181
16182                 tg3_flag_set(tp, INIT_COMPLETE);
16183                 err2 = tg3_restart_hw(tp, 1);
16184                 if (err2)
16185                         goto out;
16186
16187                 tg3_timer_start(tp);
16188
16189                 netif_device_attach(dev);
16190                 tg3_netif_start(tp);
16191
16192 out:
16193                 tg3_full_unlock(tp);
16194
16195                 if (!err2)
16196                         tg3_phy_start(tp);
16197         }
16198
16199         return err;
16200 }
16201
16202 static int tg3_resume(struct device *device)
16203 {
16204         struct pci_dev *pdev = to_pci_dev(device);
16205         struct net_device *dev = pci_get_drvdata(pdev);
16206         struct tg3 *tp = netdev_priv(dev);
16207         int err;
16208
16209         if (!netif_running(dev))
16210                 return 0;
16211
16212         netif_device_attach(dev);
16213
16214         tg3_full_lock(tp, 0);
16215
16216         tg3_flag_set(tp, INIT_COMPLETE);
16217         err = tg3_restart_hw(tp, 1);
16218         if (err)
16219                 goto out;
16220
16221         tg3_timer_start(tp);
16222
16223         tg3_netif_start(tp);
16224
16225 out:
16226         tg3_full_unlock(tp);
16227
16228         if (!err)
16229                 tg3_phy_start(tp);
16230
16231         return err;
16232 }
16233
16234 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16235 #define TG3_PM_OPS (&tg3_pm_ops)
16236
16237 #else
16238
16239 #define TG3_PM_OPS NULL
16240
16241 #endif /* CONFIG_PM_SLEEP */
16242
16243 /**
16244  * tg3_io_error_detected - called when PCI error is detected
16245  * @pdev: Pointer to PCI device
16246  * @state: The current pci connection state
16247  *
16248  * This function is called after a PCI bus error affecting
16249  * this device has been detected.
16250  */
16251 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16252                                               pci_channel_state_t state)
16253 {
16254         struct net_device *netdev = pci_get_drvdata(pdev);
16255         struct tg3 *tp = netdev_priv(netdev);
16256         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16257
16258         netdev_info(netdev, "PCI I/O error detected\n");
16259
16260         rtnl_lock();
16261
16262         if (!netif_running(netdev))
16263                 goto done;
16264
16265         tg3_phy_stop(tp);
16266
16267         tg3_netif_stop(tp);
16268
16269         tg3_timer_stop(tp);
16270
16271         /* Want to make sure that the reset task doesn't run */
16272         tg3_reset_task_cancel(tp);
16273
16274         netif_device_detach(netdev);
16275
16276         /* Clean up software state, even if MMIO is blocked */
16277         tg3_full_lock(tp, 0);
16278         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16279         tg3_full_unlock(tp);
16280
16281 done:
16282         if (state == pci_channel_io_perm_failure)
16283                 err = PCI_ERS_RESULT_DISCONNECT;
16284         else
16285                 pci_disable_device(pdev);
16286
16287         rtnl_unlock();
16288
16289         return err;
16290 }
16291
16292 /**
16293  * tg3_io_slot_reset - called after the pci bus has been reset.
16294  * @pdev: Pointer to PCI device
16295  *
16296  * Restart the card from scratch, as if from a cold-boot.
16297  * At this point, the card has exprienced a hard reset,
16298  * followed by fixups by BIOS, and has its config space
16299  * set up identically to what it was at cold boot.
16300  */
16301 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16302 {
16303         struct net_device *netdev = pci_get_drvdata(pdev);
16304         struct tg3 *tp = netdev_priv(netdev);
16305         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16306         int err;
16307
16308         rtnl_lock();
16309
16310         if (pci_enable_device(pdev)) {
16311                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16312                 goto done;
16313         }
16314
16315         pci_set_master(pdev);
16316         pci_restore_state(pdev);
16317         pci_save_state(pdev);
16318
16319         if (!netif_running(netdev)) {
16320                 rc = PCI_ERS_RESULT_RECOVERED;
16321                 goto done;
16322         }
16323
16324         err = tg3_power_up(tp);
16325         if (err)
16326                 goto done;
16327
16328         rc = PCI_ERS_RESULT_RECOVERED;
16329
16330 done:
16331         rtnl_unlock();
16332
16333         return rc;
16334 }
16335
16336 /**
16337  * tg3_io_resume - called when traffic can start flowing again.
16338  * @pdev: Pointer to PCI device
16339  *
16340  * This callback is called when the error recovery driver tells
16341  * us that its OK to resume normal operation.
16342  */
16343 static void tg3_io_resume(struct pci_dev *pdev)
16344 {
16345         struct net_device *netdev = pci_get_drvdata(pdev);
16346         struct tg3 *tp = netdev_priv(netdev);
16347         int err;
16348
16349         rtnl_lock();
16350
16351         if (!netif_running(netdev))
16352                 goto done;
16353
16354         tg3_full_lock(tp, 0);
16355         tg3_flag_set(tp, INIT_COMPLETE);
16356         err = tg3_restart_hw(tp, 1);
16357         tg3_full_unlock(tp);
16358         if (err) {
16359                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16360                 goto done;
16361         }
16362
16363         netif_device_attach(netdev);
16364
16365         tg3_timer_start(tp);
16366
16367         tg3_netif_start(tp);
16368
16369         tg3_phy_start(tp);
16370
16371 done:
16372         rtnl_unlock();
16373 }
16374
16375 static struct pci_error_handlers tg3_err_handler = {
16376         .error_detected = tg3_io_error_detected,
16377         .slot_reset     = tg3_io_slot_reset,
16378         .resume         = tg3_io_resume
16379 };
16380
16381 static struct pci_driver tg3_driver = {
16382         .name           = DRV_MODULE_NAME,
16383         .id_table       = tg3_pci_tbl,
16384         .probe          = tg3_init_one,
16385         .remove         = __devexit_p(tg3_remove_one),
16386         .err_handler    = &tg3_err_handler,
16387         .driver.pm      = TG3_PM_OPS,
16388 };
16389
16390 static int __init tg3_init(void)
16391 {
16392         return pci_register_driver(&tg3_driver);
16393 }
16394
16395 static void __exit tg3_cleanup(void)
16396 {
16397         pci_unregister_driver(&tg3_driver);
16398 }
16399
16400 module_init(tg3_init);
16401 module_exit(tg3_cleanup);