bf906c51d82a4085fd0167d826cdd3bcd497b2df
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 #endif
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #define BAR_0   0
65 #define BAR_2   2
66
67 #include "tg3.h"
68
69 /* Functions & macros to verify TG3_FLAGS types */
70
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         return test_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         set_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         clear_bit(flag, bits);
84 }
85
86 #define tg3_flag(tp, flag)                              \
87         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag)                          \
89         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag)                        \
91         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
92
93 #define DRV_MODULE_NAME         "tg3"
94 #define TG3_MAJ_NUM                     3
95 #define TG3_MIN_NUM                     124
96 #define DRV_MODULE_VERSION      \
97         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE      "March 21, 2012"
99
100 #define RESET_KIND_SHUTDOWN     0
101 #define RESET_KIND_INIT         1
102 #define RESET_KIND_SUSPEND      2
103
104 #define TG3_DEF_RX_MODE         0
105 #define TG3_DEF_TX_MODE         0
106 #define TG3_DEF_MSG_ENABLE        \
107         (NETIF_MSG_DRV          | \
108          NETIF_MSG_PROBE        | \
109          NETIF_MSG_LINK         | \
110          NETIF_MSG_TIMER        | \
111          NETIF_MSG_IFDOWN       | \
112          NETIF_MSG_IFUP         | \
113          NETIF_MSG_RX_ERR       | \
114          NETIF_MSG_TX_ERR)
115
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
117
118 /* length of time before we decide the hardware is borked,
119  * and dev->tx_timeout() should be called to fix the problem
120  */
121
122 #define TG3_TX_TIMEOUT                  (5 * HZ)
123
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU                     60
126 #define TG3_MAX_MTU(tp) \
127         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
128
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130  * You can't change the ring sizes, but you can change where you place
131  * them in the NIC onboard memory.
132  */
133 #define TG3_RX_STD_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING         200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
141
142 /* Do not place this n-ring entries value into the tp struct itself,
143  * we really want to expose these constants to GCC so that modulo et
144  * al.  operations are done with shifts and masks instead of with
145  * hw multiply/modulo instructions.  Another solution would be to
146  * replace things like '% foo' with '& (foo - 1)'.
147  */
148
149 #define TG3_TX_RING_SIZE                512
150 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
151
152 #define TG3_RX_STD_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
159                                  TG3_TX_RING_SIZE)
160 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
161
162 #define TG3_DMA_BYTE_ENAB               64
163
164 #define TG3_RX_STD_DMA_SZ               1536
165 #define TG3_RX_JMB_DMA_SZ               9046
166
167 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
168
169 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
171
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
174
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
177
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179  * that are at least dword aligned when used in PCIX mode.  The driver
180  * works around this bug by double copying the packet.  This workaround
181  * is built into the normal double copy length check for efficiency.
182  *
183  * However, the double copy is only necessary on those architectures
184  * where unaligned memory accesses are inefficient.  For those architectures
185  * where unaligned memory accesses incur little penalty, we can reintegrate
186  * the 5701 in the normal rx path.  Doing so saves a device structure
187  * dereference by hardcoding the double copy threshold in place.
188  */
189 #define TG3_RX_COPY_THRESHOLD           256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
192 #else
193         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
194 #endif
195
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
200 #endif
201
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K            2048
205 #define TG3_TX_BD_DMA_MAX_4K            4096
206
207 #define TG3_RAW_IP_ALIGN 2
208
209 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
210 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
211
212 #define FIRMWARE_TG3            "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
215
216 static char version[] __devinitdata =
217         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
218
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
226
227 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
230
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
314         {}
315 };
316
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
318
319 static const struct {
320         const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
322         { "rx_octets" },
323         { "rx_fragments" },
324         { "rx_ucast_packets" },
325         { "rx_mcast_packets" },
326         { "rx_bcast_packets" },
327         { "rx_fcs_errors" },
328         { "rx_align_errors" },
329         { "rx_xon_pause_rcvd" },
330         { "rx_xoff_pause_rcvd" },
331         { "rx_mac_ctrl_rcvd" },
332         { "rx_xoff_entered" },
333         { "rx_frame_too_long_errors" },
334         { "rx_jabbers" },
335         { "rx_undersize_packets" },
336         { "rx_in_length_errors" },
337         { "rx_out_length_errors" },
338         { "rx_64_or_less_octet_packets" },
339         { "rx_65_to_127_octet_packets" },
340         { "rx_128_to_255_octet_packets" },
341         { "rx_256_to_511_octet_packets" },
342         { "rx_512_to_1023_octet_packets" },
343         { "rx_1024_to_1522_octet_packets" },
344         { "rx_1523_to_2047_octet_packets" },
345         { "rx_2048_to_4095_octet_packets" },
346         { "rx_4096_to_8191_octet_packets" },
347         { "rx_8192_to_9022_octet_packets" },
348
349         { "tx_octets" },
350         { "tx_collisions" },
351
352         { "tx_xon_sent" },
353         { "tx_xoff_sent" },
354         { "tx_flow_control" },
355         { "tx_mac_errors" },
356         { "tx_single_collisions" },
357         { "tx_mult_collisions" },
358         { "tx_deferred" },
359         { "tx_excessive_collisions" },
360         { "tx_late_collisions" },
361         { "tx_collide_2times" },
362         { "tx_collide_3times" },
363         { "tx_collide_4times" },
364         { "tx_collide_5times" },
365         { "tx_collide_6times" },
366         { "tx_collide_7times" },
367         { "tx_collide_8times" },
368         { "tx_collide_9times" },
369         { "tx_collide_10times" },
370         { "tx_collide_11times" },
371         { "tx_collide_12times" },
372         { "tx_collide_13times" },
373         { "tx_collide_14times" },
374         { "tx_collide_15times" },
375         { "tx_ucast_packets" },
376         { "tx_mcast_packets" },
377         { "tx_bcast_packets" },
378         { "tx_carrier_sense_errors" },
379         { "tx_discards" },
380         { "tx_errors" },
381
382         { "dma_writeq_full" },
383         { "dma_write_prioq_full" },
384         { "rxbds_empty" },
385         { "rx_discards" },
386         { "rx_errors" },
387         { "rx_threshold_hit" },
388
389         { "dma_readq_full" },
390         { "dma_read_prioq_full" },
391         { "tx_comp_queue_full" },
392
393         { "ring_set_send_prod_index" },
394         { "ring_status_update" },
395         { "nic_irqs" },
396         { "nic_avoided_irqs" },
397         { "nic_tx_threshold_hit" },
398
399         { "mbuf_lwm_thresh_hit" },
400 };
401
402 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
403
404
405 static const struct {
406         const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408         { "nvram test        (online) " },
409         { "link test         (online) " },
410         { "register test     (offline)" },
411         { "memory test       (offline)" },
412         { "mac loopback test (offline)" },
413         { "phy loopback test (offline)" },
414         { "ext loopback test (offline)" },
415         { "interrupt test    (offline)" },
416 };
417
418 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
419
420
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
422 {
423         writel(val, tp->regs + off);
424 }
425
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
427 {
428         return readl(tp->regs + off);
429 }
430
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->aperegs + off);
434 }
435
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
437 {
438         return readl(tp->aperegs + off);
439 }
440
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
442 {
443         unsigned long flags;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 }
450
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off);
454         readl(tp->regs + off);
455 }
456
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 {
459         unsigned long flags;
460         u32 val;
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465         spin_unlock_irqrestore(&tp->indirect_lock, flags);
466         return val;
467 }
468
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
470 {
471         unsigned long flags;
472
473         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478         if (off == TG3_RX_STD_PROD_IDX_REG) {
479                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480                                        TG3_64BIT_REG_LOW, val);
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488
489         /* In indirect mode when disabling interrupts, we also need
490          * to clear the interrupt bit in the GRC local ctrl register.
491          */
492         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
493             (val == 0x1)) {
494                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
496         }
497 }
498
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512  * where it is unsafe to read back the register without some delay.
513  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
515  */
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
517 {
518         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519                 /* Non-posted methods */
520                 tp->write32(tp, off, val);
521         else {
522                 /* Posted method */
523                 tg3_write32(tp, off, val);
524                 if (usec_wait)
525                         udelay(usec_wait);
526                 tp->read32(tp, off);
527         }
528         /* Wait again after the read for the posted method to guarantee that
529          * the wait time is met.
530          */
531         if (usec_wait)
532                 udelay(usec_wait);
533 }
534
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
536 {
537         tp->write32_mbox(tp, off, val);
538         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539                 tp->read32_mbox(tp, off);
540 }
541
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
543 {
544         void __iomem *mbox = tp->regs + off;
545         writel(val, mbox);
546         if (tg3_flag(tp, TXD_MBOX_HWBUG))
547                 writel(val, mbox);
548         if (tg3_flag(tp, MBOX_WRITE_REORDER))
549                 readl(mbox);
550 }
551
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
553 {
554         return readl(tp->regs + off + GRCMBOX_BASE);
555 }
556
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
558 {
559         writel(val, tp->regs + off + GRCMBOX_BASE);
560 }
561
562 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
567
568 #define tw32(reg, val)                  tp->write32(tp, reg, val)
569 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg)                       tp->read32(tp, reg)
572
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
574 {
575         unsigned long flags;
576
577         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579                 return;
580
581         spin_lock_irqsave(&tp->indirect_lock, flags);
582         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
585
586                 /* Always leave this as zero. */
587                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
588         } else {
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
591
592                 /* Always leave this as zero. */
593                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
594         }
595         spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 }
597
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
599 {
600         unsigned long flags;
601
602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604                 *val = 0;
605                 return;
606         }
607
608         spin_lock_irqsave(&tp->indirect_lock, flags);
609         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
612
613                 /* Always leave this as zero. */
614                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
615         } else {
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617                 *val = tr32(TG3PCI_MEM_WIN_DATA);
618
619                 /* Always leave this as zero. */
620                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
621         }
622         spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 }
624
625 static void tg3_ape_lock_init(struct tg3 *tp)
626 {
627         int i;
628         u32 regbase, bit;
629
630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631                 regbase = TG3_APE_LOCK_GRANT;
632         else
633                 regbase = TG3_APE_PER_LOCK_GRANT;
634
635         /* Make sure the driver hasn't any stale locks. */
636         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
637                 switch (i) {
638                 case TG3_APE_LOCK_PHY0:
639                 case TG3_APE_LOCK_PHY1:
640                 case TG3_APE_LOCK_PHY2:
641                 case TG3_APE_LOCK_PHY3:
642                         bit = APE_LOCK_GRANT_DRIVER;
643                         break;
644                 default:
645                         if (!tp->pci_fn)
646                                 bit = APE_LOCK_GRANT_DRIVER;
647                         else
648                                 bit = 1 << tp->pci_fn;
649                 }
650                 tg3_ape_write32(tp, regbase + 4 * i, bit);
651         }
652
653 }
654
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
656 {
657         int i, off;
658         int ret = 0;
659         u32 status, req, gnt, bit;
660
661         if (!tg3_flag(tp, ENABLE_APE))
662                 return 0;
663
664         switch (locknum) {
665         case TG3_APE_LOCK_GPIO:
666                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667                         return 0;
668         case TG3_APE_LOCK_GRC:
669         case TG3_APE_LOCK_MEM:
670                 if (!tp->pci_fn)
671                         bit = APE_LOCK_REQ_DRIVER;
672                 else
673                         bit = 1 << tp->pci_fn;
674                 break;
675         case TG3_APE_LOCK_PHY0:
676         case TG3_APE_LOCK_PHY1:
677         case TG3_APE_LOCK_PHY2:
678         case TG3_APE_LOCK_PHY3:
679                 bit = APE_LOCK_REQ_DRIVER;
680                 break;
681         default:
682                 return -EINVAL;
683         }
684
685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
686                 req = TG3_APE_LOCK_REQ;
687                 gnt = TG3_APE_LOCK_GRANT;
688         } else {
689                 req = TG3_APE_PER_LOCK_REQ;
690                 gnt = TG3_APE_PER_LOCK_GRANT;
691         }
692
693         off = 4 * locknum;
694
695         tg3_ape_write32(tp, req + off, bit);
696
697         /* Wait for up to 1 millisecond to acquire lock. */
698         for (i = 0; i < 100; i++) {
699                 status = tg3_ape_read32(tp, gnt + off);
700                 if (status == bit)
701                         break;
702                 udelay(10);
703         }
704
705         if (status != bit) {
706                 /* Revoke the lock request. */
707                 tg3_ape_write32(tp, gnt + off, bit);
708                 ret = -EBUSY;
709         }
710
711         return ret;
712 }
713
714 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
715 {
716         u32 gnt, bit;
717
718         if (!tg3_flag(tp, ENABLE_APE))
719                 return;
720
721         switch (locknum) {
722         case TG3_APE_LOCK_GPIO:
723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
724                         return;
725         case TG3_APE_LOCK_GRC:
726         case TG3_APE_LOCK_MEM:
727                 if (!tp->pci_fn)
728                         bit = APE_LOCK_GRANT_DRIVER;
729                 else
730                         bit = 1 << tp->pci_fn;
731                 break;
732         case TG3_APE_LOCK_PHY0:
733         case TG3_APE_LOCK_PHY1:
734         case TG3_APE_LOCK_PHY2:
735         case TG3_APE_LOCK_PHY3:
736                 bit = APE_LOCK_GRANT_DRIVER;
737                 break;
738         default:
739                 return;
740         }
741
742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
743                 gnt = TG3_APE_LOCK_GRANT;
744         else
745                 gnt = TG3_APE_PER_LOCK_GRANT;
746
747         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 }
749
750 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
751 {
752         u32 apedata;
753
754         while (timeout_us) {
755                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756                         return -EBUSY;
757
758                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
759                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760                         break;
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 udelay(10);
765                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766         }
767
768         return timeout_us ? 0 : -EBUSY;
769 }
770
771 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
772 {
773         u32 i, apedata;
774
775         for (i = 0; i < timeout_us / 10; i++) {
776                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
777
778                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
779                         break;
780
781                 udelay(10);
782         }
783
784         return i == timeout_us / 10;
785 }
786
787 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
788 {
789         int err;
790         u32 i, bufoff, msgoff, maxlen, apedata;
791
792         if (!tg3_flag(tp, APE_HAS_NCSI))
793                 return 0;
794
795         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
796         if (apedata != APE_SEG_SIG_MAGIC)
797                 return -ENODEV;
798
799         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
800         if (!(apedata & APE_FW_STATUS_READY))
801                 return -EAGAIN;
802
803         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804                  TG3_APE_SHMEM_BASE;
805         msgoff = bufoff + 2 * sizeof(u32);
806         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
807
808         while (len) {
809                 u32 length;
810
811                 /* Cap xfer sizes to scratchpad limits. */
812                 length = (len > maxlen) ? maxlen : len;
813                 len -= length;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
816                 if (!(apedata & APE_FW_STATUS_READY))
817                         return -EAGAIN;
818
819                 /* Wait for up to 1 msec for APE to service previous event. */
820                 err = tg3_ape_event_lock(tp, 1000);
821                 if (err)
822                         return err;
823
824                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
825                           APE_EVENT_STATUS_SCRTCHPD_READ |
826                           APE_EVENT_STATUS_EVENT_PENDING;
827                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
828
829                 tg3_ape_write32(tp, bufoff, base_off);
830                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
831
832                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
833                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
834
835                 base_off += length;
836
837                 if (tg3_ape_wait_for_event(tp, 30000))
838                         return -EAGAIN;
839
840                 for (i = 0; length; i += 4, length -= 4) {
841                         u32 val = tg3_ape_read32(tp, msgoff + i);
842                         memcpy(data, &val, sizeof(u32));
843                         data++;
844                 }
845         }
846
847         return 0;
848 }
849
850 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
851 {
852         int err;
853         u32 apedata;
854
855         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856         if (apedata != APE_SEG_SIG_MAGIC)
857                 return -EAGAIN;
858
859         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860         if (!(apedata & APE_FW_STATUS_READY))
861                 return -EAGAIN;
862
863         /* Wait for up to 1 millisecond for APE to service previous event. */
864         err = tg3_ape_event_lock(tp, 1000);
865         if (err)
866                 return err;
867
868         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
869                         event | APE_EVENT_STATUS_EVENT_PENDING);
870
871         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874         return 0;
875 }
876
877 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
878 {
879         u32 event;
880         u32 apedata;
881
882         if (!tg3_flag(tp, ENABLE_APE))
883                 return;
884
885         switch (kind) {
886         case RESET_KIND_INIT:
887                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
888                                 APE_HOST_SEG_SIG_MAGIC);
889                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
890                                 APE_HOST_SEG_LEN_MAGIC);
891                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
892                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
893                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
894                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
895                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
896                                 APE_HOST_BEHAV_NO_PHYLOCK);
897                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
898                                     TG3_APE_HOST_DRVR_STATE_START);
899
900                 event = APE_EVENT_STATUS_STATE_START;
901                 break;
902         case RESET_KIND_SHUTDOWN:
903                 /* With the interface we are currently using,
904                  * APE does not track driver state.  Wiping
905                  * out the HOST SEGMENT SIGNATURE forces
906                  * the APE to assume OS absent status.
907                  */
908                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
909
910                 if (device_may_wakeup(&tp->pdev->dev) &&
911                     tg3_flag(tp, WOL_ENABLE)) {
912                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
913                                             TG3_APE_HOST_WOL_SPEED_AUTO);
914                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915                 } else
916                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
917
918                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
919
920                 event = APE_EVENT_STATUS_STATE_UNLOAD;
921                 break;
922         case RESET_KIND_SUSPEND:
923                 event = APE_EVENT_STATUS_STATE_SUSPEND;
924                 break;
925         default:
926                 return;
927         }
928
929         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
930
931         tg3_ape_send_event(tp, event);
932 }
933
934 static void tg3_disable_ints(struct tg3 *tp)
935 {
936         int i;
937
938         tw32(TG3PCI_MISC_HOST_CTRL,
939              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
940         for (i = 0; i < tp->irq_max; i++)
941                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
942 }
943
944 static void tg3_enable_ints(struct tg3 *tp)
945 {
946         int i;
947
948         tp->irq_sync = 0;
949         wmb();
950
951         tw32(TG3PCI_MISC_HOST_CTRL,
952              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
953
954         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
955         for (i = 0; i < tp->irq_cnt; i++) {
956                 struct tg3_napi *tnapi = &tp->napi[i];
957
958                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959                 if (tg3_flag(tp, 1SHOT_MSI))
960                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
961
962                 tp->coal_now |= tnapi->coal_now;
963         }
964
965         /* Force an initial interrupt */
966         if (!tg3_flag(tp, TAGGED_STATUS) &&
967             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
968                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969         else
970                 tw32(HOSTCC_MODE, tp->coal_now);
971
972         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
973 }
974
975 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
976 {
977         struct tg3 *tp = tnapi->tp;
978         struct tg3_hw_status *sblk = tnapi->hw_status;
979         unsigned int work_exists = 0;
980
981         /* check for phy events */
982         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
983                 if (sblk->status & SD_STATUS_LINK_CHG)
984                         work_exists = 1;
985         }
986
987         /* check for TX work to do */
988         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989                 work_exists = 1;
990
991         /* check for RX work to do */
992         if (tnapi->rx_rcb_prod_idx &&
993             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
994                 work_exists = 1;
995
996         return work_exists;
997 }
998
999 /* tg3_int_reenable
1000  *  similar to tg3_enable_ints, but it accurately determines whether there
1001  *  is new work pending and can return without flushing the PIO write
1002  *  which reenables interrupts
1003  */
1004 static void tg3_int_reenable(struct tg3_napi *tnapi)
1005 {
1006         struct tg3 *tp = tnapi->tp;
1007
1008         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009         mmiowb();
1010
1011         /* When doing tagged status, this work check is unnecessary.
1012          * The last_tag we write above tells the chip which piece of
1013          * work we've completed.
1014          */
1015         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1016                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1017                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1018 }
1019
1020 static void tg3_switch_clocks(struct tg3 *tp)
1021 {
1022         u32 clock_ctrl;
1023         u32 orig_clock_ctrl;
1024
1025         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026                 return;
1027
1028         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1029
1030         orig_clock_ctrl = clock_ctrl;
1031         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1032                        CLOCK_CTRL_CLKRUN_OENABLE |
1033                        0x1f);
1034         tp->pci_clock_ctrl = clock_ctrl;
1035
1036         if (tg3_flag(tp, 5705_PLUS)) {
1037                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1038                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1039                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1040                 }
1041         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1042                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043                             clock_ctrl |
1044                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045                             40);
1046                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1047                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048                             40);
1049         }
1050         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1051 }
1052
1053 #define PHY_BUSY_LOOPS  5000
1054
1055 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1056 {
1057         u32 frame_val;
1058         unsigned int loops;
1059         int ret;
1060
1061         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062                 tw32_f(MAC_MI_MODE,
1063                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1064                 udelay(80);
1065         }
1066
1067         tg3_ape_lock(tp, tp->phy_ape_lock);
1068
1069         *val = 0x0;
1070
1071         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1072                       MI_COM_PHY_ADDR_MASK);
1073         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1074                       MI_COM_REG_ADDR_MASK);
1075         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1076
1077         tw32_f(MAC_MI_COM, frame_val);
1078
1079         loops = PHY_BUSY_LOOPS;
1080         while (loops != 0) {
1081                 udelay(10);
1082                 frame_val = tr32(MAC_MI_COM);
1083
1084                 if ((frame_val & MI_COM_BUSY) == 0) {
1085                         udelay(5);
1086                         frame_val = tr32(MAC_MI_COM);
1087                         break;
1088                 }
1089                 loops -= 1;
1090         }
1091
1092         ret = -EBUSY;
1093         if (loops != 0) {
1094                 *val = frame_val & MI_COM_DATA_MASK;
1095                 ret = 0;
1096         }
1097
1098         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_unlock(tp, tp->phy_ape_lock);
1104
1105         return ret;
1106 }
1107
1108 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1109 {
1110         u32 frame_val;
1111         unsigned int loops;
1112         int ret;
1113
1114         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1115             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116                 return 0;
1117
1118         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119                 tw32_f(MAC_MI_MODE,
1120                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1121                 udelay(80);
1122         }
1123
1124         tg3_ape_lock(tp, tp->phy_ape_lock);
1125
1126         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (val & MI_COM_DATA_MASK);
1131         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1132
1133         tw32_f(MAC_MI_COM, frame_val);
1134
1135         loops = PHY_BUSY_LOOPS;
1136         while (loops != 0) {
1137                 udelay(10);
1138                 frame_val = tr32(MAC_MI_COM);
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0)
1149                 ret = 0;
1150
1151         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153                 udelay(80);
1154         }
1155
1156         tg3_ape_unlock(tp, tp->phy_ape_lock);
1157
1158         return ret;
1159 }
1160
1161 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1162 {
1163         int err;
1164
1165         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1166         if (err)
1167                 goto done;
1168
1169         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1170         if (err)
1171                 goto done;
1172
1173         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1174                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1175         if (err)
1176                 goto done;
1177
1178         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1179
1180 done:
1181         return err;
1182 }
1183
1184 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1185 {
1186         int err;
1187
1188         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1189         if (err)
1190                 goto done;
1191
1192         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1193         if (err)
1194                 goto done;
1195
1196         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1197                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1198         if (err)
1199                 goto done;
1200
1201         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1202
1203 done:
1204         return err;
1205 }
1206
1207 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1208 {
1209         int err;
1210
1211         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212         if (!err)
1213                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1214
1215         return err;
1216 }
1217
1218 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223         if (!err)
1224                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1225
1226         return err;
1227 }
1228
1229 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1230 {
1231         int err;
1232
1233         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1234                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1235                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1236         if (!err)
1237                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1238
1239         return err;
1240 }
1241
1242 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1243 {
1244         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1245                 set |= MII_TG3_AUXCTL_MISC_WREN;
1246
1247         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1248 }
1249
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1254
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1258
1259 static int tg3_bmcr_reset(struct tg3 *tp)
1260 {
1261         u32 phy_control;
1262         int limit, err;
1263
1264         /* OK, reset it, and poll the BMCR_RESET bit until it
1265          * clears or we time out.
1266          */
1267         phy_control = BMCR_RESET;
1268         err = tg3_writephy(tp, MII_BMCR, phy_control);
1269         if (err != 0)
1270                 return -EBUSY;
1271
1272         limit = 5000;
1273         while (limit--) {
1274                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1275                 if (err != 0)
1276                         return -EBUSY;
1277
1278                 if ((phy_control & BMCR_RESET) == 0) {
1279                         udelay(40);
1280                         break;
1281                 }
1282                 udelay(10);
1283         }
1284         if (limit < 0)
1285                 return -EBUSY;
1286
1287         return 0;
1288 }
1289
1290 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1291 {
1292         struct tg3 *tp = bp->priv;
1293         u32 val;
1294
1295         spin_lock_bh(&tp->lock);
1296
1297         if (tg3_readphy(tp, reg, &val))
1298                 val = -EIO;
1299
1300         spin_unlock_bh(&tp->lock);
1301
1302         return val;
1303 }
1304
1305 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1306 {
1307         struct tg3 *tp = bp->priv;
1308         u32 ret = 0;
1309
1310         spin_lock_bh(&tp->lock);
1311
1312         if (tg3_writephy(tp, reg, val))
1313                 ret = -EIO;
1314
1315         spin_unlock_bh(&tp->lock);
1316
1317         return ret;
1318 }
1319
1320 static int tg3_mdio_reset(struct mii_bus *bp)
1321 {
1322         return 0;
1323 }
1324
1325 static void tg3_mdio_config_5785(struct tg3 *tp)
1326 {
1327         u32 val;
1328         struct phy_device *phydev;
1329
1330         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1331         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1332         case PHY_ID_BCM50610:
1333         case PHY_ID_BCM50610M:
1334                 val = MAC_PHYCFG2_50610_LED_MODES;
1335                 break;
1336         case PHY_ID_BCMAC131:
1337                 val = MAC_PHYCFG2_AC131_LED_MODES;
1338                 break;
1339         case PHY_ID_RTL8211C:
1340                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341                 break;
1342         case PHY_ID_RTL8201E:
1343                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1344                 break;
1345         default:
1346                 return;
1347         }
1348
1349         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1350                 tw32(MAC_PHYCFG2, val);
1351
1352                 val = tr32(MAC_PHYCFG1);
1353                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1354                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1355                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1356                 tw32(MAC_PHYCFG1, val);
1357
1358                 return;
1359         }
1360
1361         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1362                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1363                        MAC_PHYCFG2_FMODE_MASK_MASK |
1364                        MAC_PHYCFG2_GMODE_MASK_MASK |
1365                        MAC_PHYCFG2_ACT_MASK_MASK   |
1366                        MAC_PHYCFG2_QUAL_MASK_MASK |
1367                        MAC_PHYCFG2_INBAND_ENABLE;
1368
1369         tw32(MAC_PHYCFG2, val);
1370
1371         val = tr32(MAC_PHYCFG1);
1372         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1373                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1374         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1375                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1377                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1378                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1379         }
1380         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1381                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1382         tw32(MAC_PHYCFG1, val);
1383
1384         val = tr32(MAC_EXT_RGMII_MODE);
1385         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1386                  MAC_RGMII_MODE_RX_QUALITY |
1387                  MAC_RGMII_MODE_RX_ACTIVITY |
1388                  MAC_RGMII_MODE_RX_ENG_DET |
1389                  MAC_RGMII_MODE_TX_ENABLE |
1390                  MAC_RGMII_MODE_TX_LOWPWR |
1391                  MAC_RGMII_MODE_TX_RESET);
1392         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1393                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1394                         val |= MAC_RGMII_MODE_RX_INT_B |
1395                                MAC_RGMII_MODE_RX_QUALITY |
1396                                MAC_RGMII_MODE_RX_ACTIVITY |
1397                                MAC_RGMII_MODE_RX_ENG_DET;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1399                         val |= MAC_RGMII_MODE_TX_ENABLE |
1400                                MAC_RGMII_MODE_TX_LOWPWR |
1401                                MAC_RGMII_MODE_TX_RESET;
1402         }
1403         tw32(MAC_EXT_RGMII_MODE, val);
1404 }
1405
1406 static void tg3_mdio_start(struct tg3 *tp)
1407 {
1408         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1409         tw32_f(MAC_MI_MODE, tp->mi_mode);
1410         udelay(80);
1411
1412         if (tg3_flag(tp, MDIOBUS_INITED) &&
1413             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414                 tg3_mdio_config_5785(tp);
1415 }
1416
1417 static int tg3_mdio_init(struct tg3 *tp)
1418 {
1419         int i;
1420         u32 reg;
1421         struct phy_device *phydev;
1422
1423         if (tg3_flag(tp, 5717_PLUS)) {
1424                 u32 is_serdes;
1425
1426                 tp->phy_addr = tp->pci_fn + 1;
1427
1428                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1429                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430                 else
1431                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1432                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1433                 if (is_serdes)
1434                         tp->phy_addr += 7;
1435         } else
1436                 tp->phy_addr = TG3_PHY_MII_ADDR;
1437
1438         tg3_mdio_start(tp);
1439
1440         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441                 return 0;
1442
1443         tp->mdio_bus = mdiobus_alloc();
1444         if (tp->mdio_bus == NULL)
1445                 return -ENOMEM;
1446
1447         tp->mdio_bus->name     = "tg3 mdio bus";
1448         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1449                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1450         tp->mdio_bus->priv     = tp;
1451         tp->mdio_bus->parent   = &tp->pdev->dev;
1452         tp->mdio_bus->read     = &tg3_mdio_read;
1453         tp->mdio_bus->write    = &tg3_mdio_write;
1454         tp->mdio_bus->reset    = &tg3_mdio_reset;
1455         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1456         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1457
1458         for (i = 0; i < PHY_MAX_ADDR; i++)
1459                 tp->mdio_bus->irq[i] = PHY_POLL;
1460
1461         /* The bus registration will look for all the PHYs on the mdio bus.
1462          * Unfortunately, it does not ensure the PHY is powered up before
1463          * accessing the PHY ID registers.  A chip reset is the
1464          * quickest way to bring the device back to an operational state..
1465          */
1466         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1467                 tg3_bmcr_reset(tp);
1468
1469         i = mdiobus_register(tp->mdio_bus);
1470         if (i) {
1471                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1472                 mdiobus_free(tp->mdio_bus);
1473                 return i;
1474         }
1475
1476         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1477
1478         if (!phydev || !phydev->drv) {
1479                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1480                 mdiobus_unregister(tp->mdio_bus);
1481                 mdiobus_free(tp->mdio_bus);
1482                 return -ENODEV;
1483         }
1484
1485         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1486         case PHY_ID_BCM57780:
1487                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1488                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489                 break;
1490         case PHY_ID_BCM50610:
1491         case PHY_ID_BCM50610M:
1492                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1493                                      PHY_BRCM_RX_REFCLK_UNUSED |
1494                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1495                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1496                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1497                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1498                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1500                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1501                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502                 /* fallthru */
1503         case PHY_ID_RTL8211C:
1504                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505                 break;
1506         case PHY_ID_RTL8201E:
1507         case PHY_ID_BCMAC131:
1508                 phydev->interface = PHY_INTERFACE_MODE_MII;
1509                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1510                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1511                 break;
1512         }
1513
1514         tg3_flag_set(tp, MDIOBUS_INITED);
1515
1516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1517                 tg3_mdio_config_5785(tp);
1518
1519         return 0;
1520 }
1521
1522 static void tg3_mdio_fini(struct tg3 *tp)
1523 {
1524         if (tg3_flag(tp, MDIOBUS_INITED)) {
1525                 tg3_flag_clear(tp, MDIOBUS_INITED);
1526                 mdiobus_unregister(tp->mdio_bus);
1527                 mdiobus_free(tp->mdio_bus);
1528         }
1529 }
1530
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3 *tp)
1533 {
1534         u32 val;
1535
1536         val = tr32(GRC_RX_CPU_EVENT);
1537         val |= GRC_RX_CPU_DRIVER_EVENT;
1538         tw32_f(GRC_RX_CPU_EVENT, val);
1539
1540         tp->last_event_jiffies = jiffies;
1541 }
1542
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1544
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3 *tp)
1547 {
1548         int i;
1549         unsigned int delay_cnt;
1550         long time_remain;
1551
1552         /* If enough time has passed, no wait is necessary. */
1553         time_remain = (long)(tp->last_event_jiffies + 1 +
1554                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555                       (long)jiffies;
1556         if (time_remain < 0)
1557                 return;
1558
1559         /* Check if we can shorten the wait time. */
1560         delay_cnt = jiffies_to_usecs(time_remain);
1561         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1562                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1563         delay_cnt = (delay_cnt >> 3) + 1;
1564
1565         for (i = 0; i < delay_cnt; i++) {
1566                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1567                         break;
1568                 udelay(8);
1569         }
1570 }
1571
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1574 {
1575         u32 reg, val;
1576
1577         val = 0;
1578         if (!tg3_readphy(tp, MII_BMCR, &reg))
1579                 val = reg << 16;
1580         if (!tg3_readphy(tp, MII_BMSR, &reg))
1581                 val |= (reg & 0xffff);
1582         *data++ = val;
1583
1584         val = 0;
1585         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1586                 val = reg << 16;
1587         if (!tg3_readphy(tp, MII_LPA, &reg))
1588                 val |= (reg & 0xffff);
1589         *data++ = val;
1590
1591         val = 0;
1592         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1593                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1594                         val = reg << 16;
1595                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1596                         val |= (reg & 0xffff);
1597         }
1598         *data++ = val;
1599
1600         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1601                 val = reg << 16;
1602         else
1603                 val = 0;
1604         *data++ = val;
1605 }
1606
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3 *tp)
1609 {
1610         u32 data[4];
1611
1612         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613                 return;
1614
1615         tg3_phy_gather_ump_data(tp, data);
1616
1617         tg3_wait_for_event_ack(tp);
1618
1619         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1620         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1621         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1622         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1623         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1624         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1625
1626         tg3_generate_fw_event(tp);
1627 }
1628
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3 *tp)
1631 {
1632         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1633                 /* Wait for RX cpu to ACK the previous event. */
1634                 tg3_wait_for_event_ack(tp);
1635
1636                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1637
1638                 tg3_generate_fw_event(tp);
1639
1640                 /* Wait for RX cpu to ACK this event. */
1641                 tg3_wait_for_event_ack(tp);
1642         }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1647 {
1648         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1649                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1650
1651         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652                 switch (kind) {
1653                 case RESET_KIND_INIT:
1654                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1655                                       DRV_STATE_START);
1656                         break;
1657
1658                 case RESET_KIND_SHUTDOWN:
1659                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1660                                       DRV_STATE_UNLOAD);
1661                         break;
1662
1663                 case RESET_KIND_SUSPEND:
1664                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665                                       DRV_STATE_SUSPEND);
1666                         break;
1667
1668                 default:
1669                         break;
1670                 }
1671         }
1672
1673         if (kind == RESET_KIND_INIT ||
1674             kind == RESET_KIND_SUSPEND)
1675                 tg3_ape_driver_state_change(tp, kind);
1676 }
1677
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1680 {
1681         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682                 switch (kind) {
1683                 case RESET_KIND_INIT:
1684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685                                       DRV_STATE_START_DONE);
1686                         break;
1687
1688                 case RESET_KIND_SHUTDOWN:
1689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690                                       DRV_STATE_UNLOAD_DONE);
1691                         break;
1692
1693                 default:
1694                         break;
1695                 }
1696         }
1697
1698         if (kind == RESET_KIND_SHUTDOWN)
1699                 tg3_ape_driver_state_change(tp, kind);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1704 {
1705         if (tg3_flag(tp, ENABLE_ASF)) {
1706                 switch (kind) {
1707                 case RESET_KIND_INIT:
1708                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709                                       DRV_STATE_START);
1710                         break;
1711
1712                 case RESET_KIND_SHUTDOWN:
1713                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714                                       DRV_STATE_UNLOAD);
1715                         break;
1716
1717                 case RESET_KIND_SUSPEND:
1718                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1719                                       DRV_STATE_SUSPEND);
1720                         break;
1721
1722                 default:
1723                         break;
1724                 }
1725         }
1726 }
1727
1728 static int tg3_poll_fw(struct tg3 *tp)
1729 {
1730         int i;
1731         u32 val;
1732
1733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1734                 /* Wait up to 20ms for init done. */
1735                 for (i = 0; i < 200; i++) {
1736                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1737                                 return 0;
1738                         udelay(100);
1739                 }
1740                 return -ENODEV;
1741         }
1742
1743         /* Wait for firmware initialization to complete. */
1744         for (i = 0; i < 100000; i++) {
1745                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1746                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1747                         break;
1748                 udelay(10);
1749         }
1750
1751         /* Chip might not be fitted with firmware.  Some Sun onboard
1752          * parts are configured like that.  So don't signal the timeout
1753          * of the above loop as an error, but do report the lack of
1754          * running firmware once.
1755          */
1756         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1757                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1758
1759                 netdev_info(tp->dev, "No firmware running\n");
1760         }
1761
1762         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1763                 /* The 57765 A0 needs a little more
1764                  * time to do some important work.
1765                  */
1766                 mdelay(10);
1767         }
1768
1769         return 0;
1770 }
1771
1772 static void tg3_link_report(struct tg3 *tp)
1773 {
1774         if (!netif_carrier_ok(tp->dev)) {
1775                 netif_info(tp, link, tp->dev, "Link is down\n");
1776                 tg3_ump_link_report(tp);
1777         } else if (netif_msg_link(tp)) {
1778                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1779                             (tp->link_config.active_speed == SPEED_1000 ?
1780                              1000 :
1781                              (tp->link_config.active_speed == SPEED_100 ?
1782                               100 : 10)),
1783                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1784                              "full" : "half"));
1785
1786                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1787                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788                             "on" : "off",
1789                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790                             "on" : "off");
1791
1792                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1793                         netdev_info(tp->dev, "EEE is %s\n",
1794                                     tp->setlpicnt ? "enabled" : "disabled");
1795
1796                 tg3_ump_link_report(tp);
1797         }
1798 }
1799
1800 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1801 {
1802         u16 miireg;
1803
1804         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1805                 miireg = ADVERTISE_1000XPAUSE;
1806         else if (flow_ctrl & FLOW_CTRL_TX)
1807                 miireg = ADVERTISE_1000XPSE_ASYM;
1808         else if (flow_ctrl & FLOW_CTRL_RX)
1809                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1810         else
1811                 miireg = 0;
1812
1813         return miireg;
1814 }
1815
1816 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1817 {
1818         u8 cap = 0;
1819
1820         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1821                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1822         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1823                 if (lcladv & ADVERTISE_1000XPAUSE)
1824                         cap = FLOW_CTRL_RX;
1825                 if (rmtadv & ADVERTISE_1000XPAUSE)
1826                         cap = FLOW_CTRL_TX;
1827         }
1828
1829         return cap;
1830 }
1831
1832 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1833 {
1834         u8 autoneg;
1835         u8 flowctrl = 0;
1836         u32 old_rx_mode = tp->rx_mode;
1837         u32 old_tx_mode = tp->tx_mode;
1838
1839         if (tg3_flag(tp, USE_PHYLIB))
1840                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841         else
1842                 autoneg = tp->link_config.autoneg;
1843
1844         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1845                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1846                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847                 else
1848                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849         } else
1850                 flowctrl = tp->link_config.flowctrl;
1851
1852         tp->link_config.active_flowctrl = flowctrl;
1853
1854         if (flowctrl & FLOW_CTRL_RX)
1855                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856         else
1857                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1858
1859         if (old_rx_mode != tp->rx_mode)
1860                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1861
1862         if (flowctrl & FLOW_CTRL_TX)
1863                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864         else
1865                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1866
1867         if (old_tx_mode != tp->tx_mode)
1868                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1869 }
1870
1871 static void tg3_adjust_link(struct net_device *dev)
1872 {
1873         u8 oldflowctrl, linkmesg = 0;
1874         u32 mac_mode, lcl_adv, rmt_adv;
1875         struct tg3 *tp = netdev_priv(dev);
1876         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1877
1878         spin_lock_bh(&tp->lock);
1879
1880         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1881                                     MAC_MODE_HALF_DUPLEX);
1882
1883         oldflowctrl = tp->link_config.active_flowctrl;
1884
1885         if (phydev->link) {
1886                 lcl_adv = 0;
1887                 rmt_adv = 0;
1888
1889                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1890                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1891                 else if (phydev->speed == SPEED_1000 ||
1892                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1893                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894                 else
1895                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1896
1897                 if (phydev->duplex == DUPLEX_HALF)
1898                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1899                 else {
1900                         lcl_adv = mii_advertise_flowctrl(
1901                                   tp->link_config.flowctrl);
1902
1903                         if (phydev->pause)
1904                                 rmt_adv = LPA_PAUSE_CAP;
1905                         if (phydev->asym_pause)
1906                                 rmt_adv |= LPA_PAUSE_ASYM;
1907                 }
1908
1909                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910         } else
1911                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1912
1913         if (mac_mode != tp->mac_mode) {
1914                 tp->mac_mode = mac_mode;
1915                 tw32_f(MAC_MODE, tp->mac_mode);
1916                 udelay(40);
1917         }
1918
1919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1920                 if (phydev->speed == SPEED_10)
1921                         tw32(MAC_MI_STAT,
1922                              MAC_MI_STAT_10MBPS_MODE |
1923                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924                 else
1925                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1926         }
1927
1928         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1929                 tw32(MAC_TX_LENGTHS,
1930                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1931                       (6 << TX_LENGTHS_IPG_SHIFT) |
1932                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933         else
1934                 tw32(MAC_TX_LENGTHS,
1935                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1936                       (6 << TX_LENGTHS_IPG_SHIFT) |
1937                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1938
1939         if (phydev->link != tp->old_link ||
1940             phydev->speed != tp->link_config.active_speed ||
1941             phydev->duplex != tp->link_config.active_duplex ||
1942             oldflowctrl != tp->link_config.active_flowctrl)
1943                 linkmesg = 1;
1944
1945         tp->old_link = phydev->link;
1946         tp->link_config.active_speed = phydev->speed;
1947         tp->link_config.active_duplex = phydev->duplex;
1948
1949         spin_unlock_bh(&tp->lock);
1950
1951         if (linkmesg)
1952                 tg3_link_report(tp);
1953 }
1954
1955 static int tg3_phy_init(struct tg3 *tp)
1956 {
1957         struct phy_device *phydev;
1958
1959         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960                 return 0;
1961
1962         /* Bring the PHY back to a known state. */
1963         tg3_bmcr_reset(tp);
1964
1965         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1966
1967         /* Attach the MAC to the PHY. */
1968         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1969                              phydev->dev_flags, phydev->interface);
1970         if (IS_ERR(phydev)) {
1971                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1972                 return PTR_ERR(phydev);
1973         }
1974
1975         /* Mask with MAC supported features. */
1976         switch (phydev->interface) {
1977         case PHY_INTERFACE_MODE_GMII:
1978         case PHY_INTERFACE_MODE_RGMII:
1979                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1980                         phydev->supported &= (PHY_GBIT_FEATURES |
1981                                               SUPPORTED_Pause |
1982                                               SUPPORTED_Asym_Pause);
1983                         break;
1984                 }
1985                 /* fallthru */
1986         case PHY_INTERFACE_MODE_MII:
1987                 phydev->supported &= (PHY_BASIC_FEATURES |
1988                                       SUPPORTED_Pause |
1989                                       SUPPORTED_Asym_Pause);
1990                 break;
1991         default:
1992                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1993                 return -EINVAL;
1994         }
1995
1996         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1997
1998         phydev->advertising = phydev->supported;
1999
2000         return 0;
2001 }
2002
2003 static void tg3_phy_start(struct tg3 *tp)
2004 {
2005         struct phy_device *phydev;
2006
2007         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008                 return;
2009
2010         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2011
2012         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2013                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2014                 phydev->speed = tp->link_config.speed;
2015                 phydev->duplex = tp->link_config.duplex;
2016                 phydev->autoneg = tp->link_config.autoneg;
2017                 phydev->advertising = tp->link_config.advertising;
2018         }
2019
2020         phy_start(phydev);
2021
2022         phy_start_aneg(phydev);
2023 }
2024
2025 static void tg3_phy_stop(struct tg3 *tp)
2026 {
2027         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028                 return;
2029
2030         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2031 }
2032
2033 static void tg3_phy_fini(struct tg3 *tp)
2034 {
2035         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2036                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2037                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2038         }
2039 }
2040
2041 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2042 {
2043         int err;
2044         u32 val;
2045
2046         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047                 return 0;
2048
2049         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2050                 /* Cannot do read-modify-write on 5401 */
2051                 err = tg3_phy_auxctl_write(tp,
2052                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2053                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2054                                            0x4c20);
2055                 goto done;
2056         }
2057
2058         err = tg3_phy_auxctl_read(tp,
2059                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2060         if (err)
2061                 return err;
2062
2063         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2064         err = tg3_phy_auxctl_write(tp,
2065                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2066
2067 done:
2068         return err;
2069 }
2070
2071 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2072 {
2073         u32 phytest;
2074
2075         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076                 u32 phy;
2077
2078                 tg3_writephy(tp, MII_TG3_FET_TEST,
2079                              phytest | MII_TG3_FET_SHADOW_EN);
2080                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081                         if (enable)
2082                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083                         else
2084                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2085                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2086                 }
2087                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2088         }
2089 }
2090
2091 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2092 {
2093         u32 reg;
2094
2095         if (!tg3_flag(tp, 5705_PLUS) ||
2096             (tg3_flag(tp, 5717_PLUS) &&
2097              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098                 return;
2099
2100         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2101                 tg3_phy_fet_toggle_apd(tp, enable);
2102                 return;
2103         }
2104
2105         reg = MII_TG3_MISC_SHDW_WREN |
2106               MII_TG3_MISC_SHDW_SCR5_SEL |
2107               MII_TG3_MISC_SHDW_SCR5_LPED |
2108               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2109               MII_TG3_MISC_SHDW_SCR5_SDTL |
2110               MII_TG3_MISC_SHDW_SCR5_C125OE;
2111         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2112                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2113
2114         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2115
2116
2117         reg = MII_TG3_MISC_SHDW_WREN |
2118               MII_TG3_MISC_SHDW_APD_SEL |
2119               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120         if (enable)
2121                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2122
2123         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2124 }
2125
2126 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2127 {
2128         u32 phy;
2129
2130         if (!tg3_flag(tp, 5705_PLUS) ||
2131             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132                 return;
2133
2134         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135                 u32 ephy;
2136
2137                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2138                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2139
2140                         tg3_writephy(tp, MII_TG3_FET_TEST,
2141                                      ephy | MII_TG3_FET_SHADOW_EN);
2142                         if (!tg3_readphy(tp, reg, &phy)) {
2143                                 if (enable)
2144                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145                                 else
2146                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2147                                 tg3_writephy(tp, reg, phy);
2148                         }
2149                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2150                 }
2151         } else {
2152                 int ret;
2153
2154                 ret = tg3_phy_auxctl_read(tp,
2155                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156                 if (!ret) {
2157                         if (enable)
2158                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159                         else
2160                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2161                         tg3_phy_auxctl_write(tp,
2162                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2163                 }
2164         }
2165 }
2166
2167 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2168 {
2169         int ret;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173                 return;
2174
2175         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176         if (!ret)
2177                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2178                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2179 }
2180
2181 static void tg3_phy_apply_otp(struct tg3 *tp)
2182 {
2183         u32 otp, phy;
2184
2185         if (!tp->phy_otp)
2186                 return;
2187
2188         otp = tp->phy_otp;
2189
2190         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191                 return;
2192
2193         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2194         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2195         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2196
2197         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2198               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2199         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2200
2201         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2202         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2203         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2204
2205         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2206         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2207
2208         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2209         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2210
2211         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2212               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2213         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2214
2215         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2216 }
2217
2218 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2219 {
2220         u32 val;
2221
2222         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2223                 return;
2224
2225         tp->setlpicnt = 0;
2226
2227         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_duplex == DUPLEX_FULL &&
2230             (tp->link_config.active_speed == SPEED_100 ||
2231              tp->link_config.active_speed == SPEED_1000)) {
2232                 u32 eeectl;
2233
2234                 if (tp->link_config.active_speed == SPEED_1000)
2235                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236                 else
2237                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2238
2239                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2240
2241                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2242                                   TG3_CL45_D7_EEERES_STAT, &val);
2243
2244                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2245                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2246                         tp->setlpicnt = 2;
2247         }
2248
2249         if (!tp->setlpicnt) {
2250                 if (current_link_up == 1 &&
2251                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2252                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2253                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254                 }
2255
2256                 val = tr32(TG3_CPMU_EEE_MODE);
2257                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2258         }
2259 }
2260
2261 static void tg3_phy_eee_enable(struct tg3 *tp)
2262 {
2263         u32 val;
2264
2265         if (tp->link_config.active_speed == SPEED_1000 &&
2266             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2267              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2268              tg3_flag(tp, 57765_CLASS)) &&
2269             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2270                 val = MII_TG3_DSP_TAP26_ALNOKO |
2271                       MII_TG3_DSP_TAP26_RMRXSTO;
2272                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2273                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2274         }
2275
2276         val = tr32(TG3_CPMU_EEE_MODE);
2277         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2278 }
2279
2280 static int tg3_wait_macro_done(struct tg3 *tp)
2281 {
2282         int limit = 100;
2283
2284         while (limit--) {
2285                 u32 tmp32;
2286
2287                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2288                         if ((tmp32 & 0x1000) == 0)
2289                                 break;
2290                 }
2291         }
2292         if (limit < 0)
2293                 return -EBUSY;
2294
2295         return 0;
2296 }
2297
2298 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2299 {
2300         static const u32 test_pat[4][6] = {
2301         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2305         };
2306         int chan;
2307
2308         for (chan = 0; chan < 4; chan++) {
2309                 int i;
2310
2311                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2312                              (chan * 0x2000) | 0x0200);
2313                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2314
2315                 for (i = 0; i < 6; i++)
2316                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317                                      test_pat[chan][i]);
2318
2319                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2320                 if (tg3_wait_macro_done(tp)) {
2321                         *resetp = 1;
2322                         return -EBUSY;
2323                 }
2324
2325                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2326                              (chan * 0x2000) | 0x0200);
2327                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2328                 if (tg3_wait_macro_done(tp)) {
2329                         *resetp = 1;
2330                         return -EBUSY;
2331                 }
2332
2333                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2334                 if (tg3_wait_macro_done(tp)) {
2335                         *resetp = 1;
2336                         return -EBUSY;
2337                 }
2338
2339                 for (i = 0; i < 6; i += 2) {
2340                         u32 low, high;
2341
2342                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2343                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2344                             tg3_wait_macro_done(tp)) {
2345                                 *resetp = 1;
2346                                 return -EBUSY;
2347                         }
2348                         low &= 0x7fff;
2349                         high &= 0x000f;
2350                         if (low != test_pat[chan][i] ||
2351                             high != test_pat[chan][i+1]) {
2352                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2353                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2354                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2355
2356                                 return -EBUSY;
2357                         }
2358                 }
2359         }
2360
2361         return 0;
2362 }
2363
2364 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2365 {
2366         int chan;
2367
2368         for (chan = 0; chan < 4; chan++) {
2369                 int i;
2370
2371                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2372                              (chan * 0x2000) | 0x0200);
2373                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2374                 for (i = 0; i < 6; i++)
2375                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2376                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2377                 if (tg3_wait_macro_done(tp))
2378                         return -EBUSY;
2379         }
2380
2381         return 0;
2382 }
2383
2384 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2385 {
2386         u32 reg32, phy9_orig;
2387         int retries, do_phy_reset, err;
2388
2389         retries = 10;
2390         do_phy_reset = 1;
2391         do {
2392                 if (do_phy_reset) {
2393                         err = tg3_bmcr_reset(tp);
2394                         if (err)
2395                                 return err;
2396                         do_phy_reset = 0;
2397                 }
2398
2399                 /* Disable transmitter and interrupt.  */
2400                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2401                         continue;
2402
2403                 reg32 |= 0x3000;
2404                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2405
2406                 /* Set full-duplex, 1000 mbps.  */
2407                 tg3_writephy(tp, MII_BMCR,
2408                              BMCR_FULLDPLX | BMCR_SPEED1000);
2409
2410                 /* Set to master mode.  */
2411                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412                         continue;
2413
2414                 tg3_writephy(tp, MII_CTRL1000,
2415                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2416
2417                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2418                 if (err)
2419                         return err;
2420
2421                 /* Block the PHY control access.  */
2422                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423
2424                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425                 if (!err)
2426                         break;
2427         } while (--retries);
2428
2429         err = tg3_phy_reset_chanpat(tp);
2430         if (err)
2431                 return err;
2432
2433         tg3_phydsp_write(tp, 0x8005, 0x0000);
2434
2435         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2436         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2437
2438         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439
2440         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2441
2442         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2443                 reg32 &= ~0x3000;
2444                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445         } else if (!err)
2446                 err = -EBUSY;
2447
2448         return err;
2449 }
2450
2451 /* This will reset the tigon3 PHY if there is no valid
2452  * link unless the FORCE argument is non-zero.
2453  */
2454 static int tg3_phy_reset(struct tg3 *tp)
2455 {
2456         u32 val, cpmuctrl;
2457         int err;
2458
2459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2460                 val = tr32(GRC_MISC_CFG);
2461                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462                 udelay(40);
2463         }
2464         err  = tg3_readphy(tp, MII_BMSR, &val);
2465         err |= tg3_readphy(tp, MII_BMSR, &val);
2466         if (err != 0)
2467                 return -EBUSY;
2468
2469         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2470                 netif_carrier_off(tp->dev);
2471                 tg3_link_report(tp);
2472         }
2473
2474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2477                 err = tg3_phy_reset_5703_4_5(tp);
2478                 if (err)
2479                         return err;
2480                 goto out;
2481         }
2482
2483         cpmuctrl = 0;
2484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2485             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2486                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2487                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488                         tw32(TG3_CPMU_CTRL,
2489                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2490         }
2491
2492         err = tg3_bmcr_reset(tp);
2493         if (err)
2494                 return err;
2495
2496         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2497                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2498                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2499
2500                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2501         }
2502
2503         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2504             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2505                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2506                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2507                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2508                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509                         udelay(40);
2510                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2511                 }
2512         }
2513
2514         if (tg3_flag(tp, 5717_PLUS) &&
2515             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516                 return 0;
2517
2518         tg3_phy_apply_otp(tp);
2519
2520         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2521                 tg3_phy_toggle_apd(tp, true);
2522         else
2523                 tg3_phy_toggle_apd(tp, false);
2524
2525 out:
2526         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2527             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2528                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2529                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2530                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2531         }
2532
2533         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2534                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2536         }
2537
2538         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2539                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2540                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2541                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2542                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2543                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2544                 }
2545         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2546                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2547                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2548                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2549                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2550                                 tg3_writephy(tp, MII_TG3_TEST1,
2551                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2552                         } else
2553                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2554
2555                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2556                 }
2557         }
2558
2559         /* Set Extended packet length bit (bit 14) on all chips that */
2560         /* support jumbo frames */
2561         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2562                 /* Cannot do read-modify-write on 5401 */
2563                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2564         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2565                 /* Set bit 14 with read-modify-write to preserve other bits */
2566                 err = tg3_phy_auxctl_read(tp,
2567                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568                 if (!err)
2569                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2570                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2571         }
2572
2573         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574          * jumbo frames transmission.
2575          */
2576         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2577                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2578                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2579                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2580         }
2581
2582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2583                 /* adjust output voltage */
2584                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2585         }
2586
2587         tg3_phy_toggle_automdix(tp, 1);
2588         tg3_phy_set_wirespeed(tp);
2589         return 0;
2590 }
2591
2592 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2594 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2595                                           TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600          (TG3_GPIO_MSG_DRVR_PRES << 12))
2601
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606          (TG3_GPIO_MSG_NEED_VAUX << 12))
2607
2608 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2609 {
2610         u32 status, shift;
2611
2612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2614                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615         else
2616                 status = tr32(TG3_CPMU_DRV_STATUS);
2617
2618         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2619         status &= ~(TG3_GPIO_MSG_MASK << shift);
2620         status |= (newstat << shift);
2621
2622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2624                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625         else
2626                 tw32(TG3_CPMU_DRV_STATUS, status);
2627
2628         return status >> TG3_APE_GPIO_MSG_SHIFT;
2629 }
2630
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2632 {
2633         if (!tg3_flag(tp, IS_NIC))
2634                 return 0;
2635
2636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2639                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640                         return -EIO;
2641
2642                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2643
2644                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2645                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2646
2647                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648         } else {
2649                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2650                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2651         }
2652
2653         return 0;
2654 }
2655
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2657 {
2658         u32 grc_local_ctrl;
2659
2660         if (!tg3_flag(tp, IS_NIC) ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663                 return;
2664
2665         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2666
2667         tw32_wait_f(GRC_LOCAL_CTRL,
2668                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2669                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2670
2671         tw32_wait_f(GRC_LOCAL_CTRL,
2672                     grc_local_ctrl,
2673                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2674
2675         tw32_wait_f(GRC_LOCAL_CTRL,
2676                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2677                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2678 }
2679
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2681 {
2682         if (!tg3_flag(tp, IS_NIC))
2683                 return;
2684
2685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2688                             (GRC_LCLCTRL_GPIO_OE0 |
2689                              GRC_LCLCTRL_GPIO_OE1 |
2690                              GRC_LCLCTRL_GPIO_OE2 |
2691                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2692                              GRC_LCLCTRL_GPIO_OUTPUT1),
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2695                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2696                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2698                                      GRC_LCLCTRL_GPIO_OE1 |
2699                                      GRC_LCLCTRL_GPIO_OE2 |
2700                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2701                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2702                                      tp->grc_local_ctrl;
2703                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2704                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2705
2706                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2707                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2708                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2709
2710                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2711                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2712                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2713         } else {
2714                 u32 no_gpio2;
2715                 u32 grc_local_ctrl = 0;
2716
2717                 /* Workaround to prevent overdrawing Amps. */
2718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2719                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2720                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721                                     grc_local_ctrl,
2722                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2723                 }
2724
2725                 /* On 5753 and variants, GPIO2 cannot be used. */
2726                 no_gpio2 = tp->nic_sram_data_cfg &
2727                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2728
2729                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2730                                   GRC_LCLCTRL_GPIO_OE1 |
2731                                   GRC_LCLCTRL_GPIO_OE2 |
2732                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2733                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2734                 if (no_gpio2) {
2735                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2736                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2737                 }
2738                 tw32_wait_f(GRC_LOCAL_CTRL,
2739                             tp->grc_local_ctrl | grc_local_ctrl,
2740                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2741
2742                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2743
2744                 tw32_wait_f(GRC_LOCAL_CTRL,
2745                             tp->grc_local_ctrl | grc_local_ctrl,
2746                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2747
2748                 if (!no_gpio2) {
2749                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2750                         tw32_wait_f(GRC_LOCAL_CTRL,
2751                                     tp->grc_local_ctrl | grc_local_ctrl,
2752                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2753                 }
2754         }
2755 }
2756
2757 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2758 {
2759         u32 msg = 0;
2760
2761         /* Serialize power state transitions */
2762         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763                 return;
2764
2765         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2766                 msg = TG3_GPIO_MSG_NEED_VAUX;
2767
2768         msg = tg3_set_function_status(tp, msg);
2769
2770         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771                 goto done;
2772
2773         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2774                 tg3_pwrsrc_switch_to_vaux(tp);
2775         else
2776                 tg3_pwrsrc_die_with_vmain(tp);
2777
2778 done:
2779         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2780 }
2781
2782 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2783 {
2784         bool need_vaux = false;
2785
2786         /* The GPIOs do something completely different on 57765. */
2787         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788                 return;
2789
2790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2793                 tg3_frob_aux_power_5717(tp, include_wol ?
2794                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2795                 return;
2796         }
2797
2798         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2799                 struct net_device *dev_peer;
2800
2801                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2802
2803                 /* remove_one() may have been run on the peer. */
2804                 if (dev_peer) {
2805                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2806
2807                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2808                                 return;
2809
2810                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2811                             tg3_flag(tp_peer, ENABLE_ASF))
2812                                 need_vaux = true;
2813                 }
2814         }
2815
2816         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2817             tg3_flag(tp, ENABLE_ASF))
2818                 need_vaux = true;
2819
2820         if (need_vaux)
2821                 tg3_pwrsrc_switch_to_vaux(tp);
2822         else
2823                 tg3_pwrsrc_die_with_vmain(tp);
2824 }
2825
2826 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2827 {
2828         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829                 return 1;
2830         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2831                 if (speed != SPEED_10)
2832                         return 1;
2833         } else if (speed == SPEED_10)
2834                 return 1;
2835
2836         return 0;
2837 }
2838
2839 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2840 {
2841         u32 val;
2842
2843         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2845                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2846                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2847
2848                         sg_dig_ctrl |=
2849                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2850                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2851                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2852                 }
2853                 return;
2854         }
2855
2856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857                 tg3_bmcr_reset(tp);
2858                 val = tr32(GRC_MISC_CFG);
2859                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860                 udelay(40);
2861                 return;
2862         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863                 u32 phytest;
2864                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865                         u32 phy;
2866
2867                         tg3_writephy(tp, MII_ADVERTISE, 0);
2868                         tg3_writephy(tp, MII_BMCR,
2869                                      BMCR_ANENABLE | BMCR_ANRESTART);
2870
2871                         tg3_writephy(tp, MII_TG3_FET_TEST,
2872                                      phytest | MII_TG3_FET_SHADOW_EN);
2873                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2874                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875                                 tg3_writephy(tp,
2876                                              MII_TG3_FET_SHDW_AUXMODE4,
2877                                              phy);
2878                         }
2879                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2880                 }
2881                 return;
2882         } else if (do_low_power) {
2883                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2884                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2885
2886                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2887                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2888                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2889                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2890         }
2891
2892         /* The PHY should not be powered down on some chips because
2893          * of bugs.
2894          */
2895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2897             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2898              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2899             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2900              !tp->pci_fn))
2901                 return;
2902
2903         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2904             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2905                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2906                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2907                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2908                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2909         }
2910
2911         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2912 }
2913
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3 *tp)
2916 {
2917         if (tg3_flag(tp, NVRAM)) {
2918                 int i;
2919
2920                 if (tp->nvram_lock_cnt == 0) {
2921                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2922                         for (i = 0; i < 8000; i++) {
2923                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2924                                         break;
2925                                 udelay(20);
2926                         }
2927                         if (i == 8000) {
2928                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2929                                 return -ENODEV;
2930                         }
2931                 }
2932                 tp->nvram_lock_cnt++;
2933         }
2934         return 0;
2935 }
2936
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3 *tp)
2939 {
2940         if (tg3_flag(tp, NVRAM)) {
2941                 if (tp->nvram_lock_cnt > 0)
2942                         tp->nvram_lock_cnt--;
2943                 if (tp->nvram_lock_cnt == 0)
2944                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2945         }
2946 }
2947
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3 *tp)
2950 {
2951         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2952                 u32 nvaccess = tr32(NVRAM_ACCESS);
2953
2954                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2955         }
2956 }
2957
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3 *tp)
2960 {
2961         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2962                 u32 nvaccess = tr32(NVRAM_ACCESS);
2963
2964                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2965         }
2966 }
2967
2968 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2969                                         u32 offset, u32 *val)
2970 {
2971         u32 tmp;
2972         int i;
2973
2974         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975                 return -EINVAL;
2976
2977         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2978                                         EEPROM_ADDR_DEVID_MASK |
2979                                         EEPROM_ADDR_READ);
2980         tw32(GRC_EEPROM_ADDR,
2981              tmp |
2982              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2983              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2984               EEPROM_ADDR_ADDR_MASK) |
2985              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2986
2987         for (i = 0; i < 1000; i++) {
2988                 tmp = tr32(GRC_EEPROM_ADDR);
2989
2990                 if (tmp & EEPROM_ADDR_COMPLETE)
2991                         break;
2992                 msleep(1);
2993         }
2994         if (!(tmp & EEPROM_ADDR_COMPLETE))
2995                 return -EBUSY;
2996
2997         tmp = tr32(GRC_EEPROM_DATA);
2998
2999         /*
3000          * The data will always be opposite the native endian
3001          * format.  Perform a blind byteswap to compensate.
3002          */
3003         *val = swab32(tmp);
3004
3005         return 0;
3006 }
3007
3008 #define NVRAM_CMD_TIMEOUT 10000
3009
3010 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3011 {
3012         int i;
3013
3014         tw32(NVRAM_CMD, nvram_cmd);
3015         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016                 udelay(10);
3017                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3018                         udelay(10);
3019                         break;
3020                 }
3021         }
3022
3023         if (i == NVRAM_CMD_TIMEOUT)
3024                 return -EBUSY;
3025
3026         return 0;
3027 }
3028
3029 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3030 {
3031         if (tg3_flag(tp, NVRAM) &&
3032             tg3_flag(tp, NVRAM_BUFFERED) &&
3033             tg3_flag(tp, FLASH) &&
3034             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3035             (tp->nvram_jedecnum == JEDEC_ATMEL))
3036
3037                 addr = ((addr / tp->nvram_pagesize) <<
3038                         ATMEL_AT45DB0X1B_PAGE_POS) +
3039                        (addr % tp->nvram_pagesize);
3040
3041         return addr;
3042 }
3043
3044 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3045 {
3046         if (tg3_flag(tp, NVRAM) &&
3047             tg3_flag(tp, NVRAM_BUFFERED) &&
3048             tg3_flag(tp, FLASH) &&
3049             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3050             (tp->nvram_jedecnum == JEDEC_ATMEL))
3051
3052                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3053                         tp->nvram_pagesize) +
3054                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3055
3056         return addr;
3057 }
3058
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060  * the byteswapping settings for all other register accesses.
3061  * tg3 devices are BE devices, so on a BE machine, the data
3062  * returned will be exactly as it is seen in NVRAM.  On a LE
3063  * machine, the 32-bit value will be byteswapped.
3064  */
3065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3066 {
3067         int ret;
3068
3069         if (!tg3_flag(tp, NVRAM))
3070                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071
3072         offset = tg3_nvram_phys_addr(tp, offset);
3073
3074         if (offset > NVRAM_ADDR_MSK)
3075                 return -EINVAL;
3076
3077         ret = tg3_nvram_lock(tp);
3078         if (ret)
3079                 return ret;
3080
3081         tg3_enable_nvram_access(tp);
3082
3083         tw32(NVRAM_ADDR, offset);
3084         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3085                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3086
3087         if (ret == 0)
3088                 *val = tr32(NVRAM_RDDATA);
3089
3090         tg3_disable_nvram_access(tp);
3091
3092         tg3_nvram_unlock(tp);
3093
3094         return ret;
3095 }
3096
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3099 {
3100         u32 v;
3101         int res = tg3_nvram_read(tp, offset, &v);
3102         if (!res)
3103                 *val = cpu_to_be32(v);
3104         return res;
3105 }
3106
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3108                                     u32 offset, u32 len, u8 *buf)
3109 {
3110         int i, j, rc = 0;
3111         u32 val;
3112
3113         for (i = 0; i < len; i += 4) {
3114                 u32 addr;
3115                 __be32 data;
3116
3117                 addr = offset + i;
3118
3119                 memcpy(&data, buf + i, 4);
3120
3121                 /*
3122                  * The SEEPROM interface expects the data to always be opposite
3123                  * the native endian format.  We accomplish this by reversing
3124                  * all the operations that would have been performed on the
3125                  * data from a call to tg3_nvram_read_be32().
3126                  */
3127                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3128
3129                 val = tr32(GRC_EEPROM_ADDR);
3130                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3131
3132                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133                         EEPROM_ADDR_READ);
3134                 tw32(GRC_EEPROM_ADDR, val |
3135                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3136                         (addr & EEPROM_ADDR_ADDR_MASK) |
3137                         EEPROM_ADDR_START |
3138                         EEPROM_ADDR_WRITE);
3139
3140                 for (j = 0; j < 1000; j++) {
3141                         val = tr32(GRC_EEPROM_ADDR);
3142
3143                         if (val & EEPROM_ADDR_COMPLETE)
3144                                 break;
3145                         msleep(1);
3146                 }
3147                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3148                         rc = -EBUSY;
3149                         break;
3150                 }
3151         }
3152
3153         return rc;
3154 }
3155
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3158                 u8 *buf)
3159 {
3160         int ret = 0;
3161         u32 pagesize = tp->nvram_pagesize;
3162         u32 pagemask = pagesize - 1;
3163         u32 nvram_cmd;
3164         u8 *tmp;
3165
3166         tmp = kmalloc(pagesize, GFP_KERNEL);
3167         if (tmp == NULL)
3168                 return -ENOMEM;
3169
3170         while (len) {
3171                 int j;
3172                 u32 phy_addr, page_off, size;
3173
3174                 phy_addr = offset & ~pagemask;
3175
3176                 for (j = 0; j < pagesize; j += 4) {
3177                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3178                                                   (__be32 *) (tmp + j));
3179                         if (ret)
3180                                 break;
3181                 }
3182                 if (ret)
3183                         break;
3184
3185                 page_off = offset & pagemask;
3186                 size = pagesize;
3187                 if (len < size)
3188                         size = len;
3189
3190                 len -= size;
3191
3192                 memcpy(tmp + page_off, buf, size);
3193
3194                 offset = offset + (pagesize - page_off);
3195
3196                 tg3_enable_nvram_access(tp);
3197
3198                 /*
3199                  * Before we can erase the flash page, we need
3200                  * to issue a special "write enable" command.
3201                  */
3202                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3203
3204                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205                         break;
3206
3207                 /* Erase the target page */
3208                 tw32(NVRAM_ADDR, phy_addr);
3209
3210                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3211                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3212
3213                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214                         break;
3215
3216                 /* Issue another write enable to start the write. */
3217                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218
3219                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220                         break;
3221
3222                 for (j = 0; j < pagesize; j += 4) {
3223                         __be32 data;
3224
3225                         data = *((__be32 *) (tmp + j));
3226
3227                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3228
3229                         tw32(NVRAM_ADDR, phy_addr + j);
3230
3231                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3232                                 NVRAM_CMD_WR;
3233
3234                         if (j == 0)
3235                                 nvram_cmd |= NVRAM_CMD_FIRST;
3236                         else if (j == (pagesize - 4))
3237                                 nvram_cmd |= NVRAM_CMD_LAST;
3238
3239                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3240                         if (ret)
3241                                 break;
3242                 }
3243                 if (ret)
3244                         break;
3245         }
3246
3247         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248         tg3_nvram_exec_cmd(tp, nvram_cmd);
3249
3250         kfree(tmp);
3251
3252         return ret;
3253 }
3254
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3257                 u8 *buf)
3258 {
3259         int i, ret = 0;
3260
3261         for (i = 0; i < len; i += 4, offset += 4) {
3262                 u32 page_off, phy_addr, nvram_cmd;
3263                 __be32 data;
3264
3265                 memcpy(&data, buf + i, 4);
3266                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3267
3268                 page_off = offset % tp->nvram_pagesize;
3269
3270                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3271
3272                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3273
3274                 if (page_off == 0 || i == 0)
3275                         nvram_cmd |= NVRAM_CMD_FIRST;
3276                 if (page_off == (tp->nvram_pagesize - 4))
3277                         nvram_cmd |= NVRAM_CMD_LAST;
3278
3279                 if (i == (len - 4))
3280                         nvram_cmd |= NVRAM_CMD_LAST;
3281
3282                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3283                     !tg3_flag(tp, FLASH) ||
3284                     !tg3_flag(tp, 57765_PLUS))
3285                         tw32(NVRAM_ADDR, phy_addr);
3286
3287                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3288                     !tg3_flag(tp, 5755_PLUS) &&
3289                     (tp->nvram_jedecnum == JEDEC_ST) &&
3290                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3291                         u32 cmd;
3292
3293                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3294                         ret = tg3_nvram_exec_cmd(tp, cmd);
3295                         if (ret)
3296                                 break;
3297                 }
3298                 if (!tg3_flag(tp, FLASH)) {
3299                         /* We always do complete word writes to eeprom. */
3300                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3301                 }
3302
3303                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3304                 if (ret)
3305                         break;
3306         }
3307         return ret;
3308 }
3309
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3312 {
3313         int ret;
3314
3315         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3316                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3317                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3318                 udelay(40);
3319         }
3320
3321         if (!tg3_flag(tp, NVRAM)) {
3322                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3323         } else {
3324                 u32 grc_mode;
3325
3326                 ret = tg3_nvram_lock(tp);
3327                 if (ret)
3328                         return ret;
3329
3330                 tg3_enable_nvram_access(tp);
3331                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3332                         tw32(NVRAM_WRITE1, 0x406);
3333
3334                 grc_mode = tr32(GRC_MODE);
3335                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3336
3337                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3338                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339                                 buf);
3340                 } else {
3341                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3342                                 buf);
3343                 }
3344
3345                 grc_mode = tr32(GRC_MODE);
3346                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3347
3348                 tg3_disable_nvram_access(tp);
3349                 tg3_nvram_unlock(tp);
3350         }
3351
3352         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3353                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3354                 udelay(40);
3355         }
3356
3357         return ret;
3358 }
3359
3360 #define RX_CPU_SCRATCH_BASE     0x30000
3361 #define RX_CPU_SCRATCH_SIZE     0x04000
3362 #define TX_CPU_SCRATCH_BASE     0x34000
3363 #define TX_CPU_SCRATCH_SIZE     0x04000
3364
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3367 {
3368         int i;
3369
3370         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3371
3372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3373                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3374
3375                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376                 return 0;
3377         }
3378         if (offset == RX_CPU_BASE) {
3379                 for (i = 0; i < 10000; i++) {
3380                         tw32(offset + CPU_STATE, 0xffffffff);
3381                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3382                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3383                                 break;
3384                 }
3385
3386                 tw32(offset + CPU_STATE, 0xffffffff);
3387                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3388                 udelay(10);
3389         } else {
3390                 for (i = 0; i < 10000; i++) {
3391                         tw32(offset + CPU_STATE, 0xffffffff);
3392                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3393                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3394                                 break;
3395                 }
3396         }
3397
3398         if (i >= 10000) {
3399                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3400                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3401                 return -ENODEV;
3402         }
3403
3404         /* Clear firmware's nvram arbitration. */
3405         if (tg3_flag(tp, NVRAM))
3406                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3407         return 0;
3408 }
3409
3410 struct fw_info {
3411         unsigned int fw_base;
3412         unsigned int fw_len;
3413         const __be32 *fw_data;
3414 };
3415
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3418                                  u32 cpu_scratch_base, int cpu_scratch_size,
3419                                  struct fw_info *info)
3420 {
3421         int err, lock_err, i;
3422         void (*write_op)(struct tg3 *, u32, u32);
3423
3424         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425                 netdev_err(tp->dev,
3426                            "%s: Trying to load TX cpu firmware which is 5705\n",
3427                            __func__);
3428                 return -EINVAL;
3429         }
3430
3431         if (tg3_flag(tp, 5705_PLUS))
3432                 write_op = tg3_write_mem;
3433         else
3434                 write_op = tg3_write_indirect_reg32;
3435
3436         /* It is possible that bootcode is still loading at this point.
3437          * Get the nvram lock first before halting the cpu.
3438          */
3439         lock_err = tg3_nvram_lock(tp);
3440         err = tg3_halt_cpu(tp, cpu_base);
3441         if (!lock_err)
3442                 tg3_nvram_unlock(tp);
3443         if (err)
3444                 goto out;
3445
3446         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3447                 write_op(tp, cpu_scratch_base + i, 0);
3448         tw32(cpu_base + CPU_STATE, 0xffffffff);
3449         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3450         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3451                 write_op(tp, (cpu_scratch_base +
3452                               (info->fw_base & 0xffff) +
3453                               (i * sizeof(u32))),
3454                               be32_to_cpu(info->fw_data[i]));
3455
3456         err = 0;
3457
3458 out:
3459         return err;
3460 }
3461
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3464 {
3465         struct fw_info info;
3466         const __be32 *fw_data;
3467         int err, i;
3468
3469         fw_data = (void *)tp->fw->data;
3470
3471         /* Firmware blob starts with version numbers, followed by
3472            start address and length. We are setting complete length.
3473            length = end_address_of_bss - start_address_of_text.
3474            Remainder is the blob to be loaded contiguously
3475            from start address. */
3476
3477         info.fw_base = be32_to_cpu(fw_data[1]);
3478         info.fw_len = tp->fw->size - 12;
3479         info.fw_data = &fw_data[3];
3480
3481         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3482                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3483                                     &info);
3484         if (err)
3485                 return err;
3486
3487         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3488                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3489                                     &info);
3490         if (err)
3491                 return err;
3492
3493         /* Now startup only the RX cpu. */
3494         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3495         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3496
3497         for (i = 0; i < 5; i++) {
3498                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499                         break;
3500                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3501                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3502                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3503                 udelay(1000);
3504         }
3505         if (i >= 5) {
3506                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3507                            "should be %08x\n", __func__,
3508                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509                 return -ENODEV;
3510         }
3511         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3512         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3513
3514         return 0;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3 *tp)
3519 {
3520         struct fw_info info;
3521         const __be32 *fw_data;
3522         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523         int err, i;
3524
3525         if (tg3_flag(tp, HW_TSO_1) ||
3526             tg3_flag(tp, HW_TSO_2) ||
3527             tg3_flag(tp, HW_TSO_3))
3528                 return 0;
3529
3530         fw_data = (void *)tp->fw->data;
3531
3532         /* Firmware blob starts with version numbers, followed by
3533            start address and length. We are setting complete length.
3534            length = end_address_of_bss - start_address_of_text.
3535            Remainder is the blob to be loaded contiguously
3536            from start address. */
3537
3538         info.fw_base = be32_to_cpu(fw_data[1]);
3539         cpu_scratch_size = tp->fw_len;
3540         info.fw_len = tp->fw->size - 12;
3541         info.fw_data = &fw_data[3];
3542
3543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3544                 cpu_base = RX_CPU_BASE;
3545                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546         } else {
3547                 cpu_base = TX_CPU_BASE;
3548                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3549                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3550         }
3551
3552         err = tg3_load_firmware_cpu(tp, cpu_base,
3553                                     cpu_scratch_base, cpu_scratch_size,
3554                                     &info);
3555         if (err)
3556                 return err;
3557
3558         /* Now startup the cpu. */
3559         tw32(cpu_base + CPU_STATE, 0xffffffff);
3560         tw32_f(cpu_base + CPU_PC, info.fw_base);
3561
3562         for (i = 0; i < 5; i++) {
3563                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564                         break;
3565                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3566                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3567                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3568                 udelay(1000);
3569         }
3570         if (i >= 5) {
3571                 netdev_err(tp->dev,
3572                            "%s fails to set CPU PC, is %08x should be %08x\n",
3573                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574                 return -ENODEV;
3575         }
3576         tw32(cpu_base + CPU_STATE, 0xffffffff);
3577         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3578         return 0;
3579 }
3580
3581
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3584 {
3585         u32 addr_high, addr_low;
3586         int i;
3587
3588         addr_high = ((tp->dev->dev_addr[0] << 8) |
3589                      tp->dev->dev_addr[1]);
3590         addr_low = ((tp->dev->dev_addr[2] << 24) |
3591                     (tp->dev->dev_addr[3] << 16) |
3592                     (tp->dev->dev_addr[4] <<  8) |
3593                     (tp->dev->dev_addr[5] <<  0));
3594         for (i = 0; i < 4; i++) {
3595                 if (i == 1 && skip_mac_1)
3596                         continue;
3597                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3598                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3599         }
3600
3601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3602             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3603                 for (i = 0; i < 12; i++) {
3604                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3605                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3606                 }
3607         }
3608
3609         addr_high = (tp->dev->dev_addr[0] +
3610                      tp->dev->dev_addr[1] +
3611                      tp->dev->dev_addr[2] +
3612                      tp->dev->dev_addr[3] +
3613                      tp->dev->dev_addr[4] +
3614                      tp->dev->dev_addr[5]) &
3615                 TX_BACKOFF_SEED_MASK;
3616         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3617 }
3618
3619 static void tg3_enable_register_access(struct tg3 *tp)
3620 {
3621         /*
3622          * Make sure register accesses (indirect or otherwise) will function
3623          * correctly.
3624          */
3625         pci_write_config_dword(tp->pdev,
3626                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3627 }
3628
3629 static int tg3_power_up(struct tg3 *tp)
3630 {
3631         int err;
3632
3633         tg3_enable_register_access(tp);
3634
3635         err = pci_set_power_state(tp->pdev, PCI_D0);
3636         if (!err) {
3637                 /* Switch out of Vaux if it is a NIC */
3638                 tg3_pwrsrc_switch_to_vmain(tp);
3639         } else {
3640                 netdev_err(tp->dev, "Transition to D0 failed\n");
3641         }
3642
3643         return err;
3644 }
3645
3646 static int tg3_setup_phy(struct tg3 *, int);
3647
3648 static int tg3_power_down_prepare(struct tg3 *tp)
3649 {
3650         u32 misc_host_ctrl;
3651         bool device_should_wake, do_low_power;
3652
3653         tg3_enable_register_access(tp);
3654
3655         /* Restore the CLKREQ setting. */
3656         if (tg3_flag(tp, CLKREQ_BUG)) {
3657                 u16 lnkctl;
3658
3659                 pci_read_config_word(tp->pdev,
3660                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3661                                      &lnkctl);
3662                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3663                 pci_write_config_word(tp->pdev,
3664                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3665                                       lnkctl);
3666         }
3667
3668         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3669         tw32(TG3PCI_MISC_HOST_CTRL,
3670              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3671
3672         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3673                              tg3_flag(tp, WOL_ENABLE);
3674
3675         if (tg3_flag(tp, USE_PHYLIB)) {
3676                 do_low_power = false;
3677                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3678                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3679                         struct phy_device *phydev;
3680                         u32 phyid, advertising;
3681
3682                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3683
3684                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3685
3686                         tp->link_config.speed = phydev->speed;
3687                         tp->link_config.duplex = phydev->duplex;
3688                         tp->link_config.autoneg = phydev->autoneg;
3689                         tp->link_config.advertising = phydev->advertising;
3690
3691                         advertising = ADVERTISED_TP |
3692                                       ADVERTISED_Pause |
3693                                       ADVERTISED_Autoneg |
3694                                       ADVERTISED_10baseT_Half;
3695
3696                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3697                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3698                                         advertising |=
3699                                                 ADVERTISED_100baseT_Half |
3700                                                 ADVERTISED_100baseT_Full |
3701                                                 ADVERTISED_10baseT_Full;
3702                                 else
3703                                         advertising |= ADVERTISED_10baseT_Full;
3704                         }
3705
3706                         phydev->advertising = advertising;
3707
3708                         phy_start_aneg(phydev);
3709
3710                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3711                         if (phyid != PHY_ID_BCMAC131) {
3712                                 phyid &= PHY_BCM_OUI_MASK;
3713                                 if (phyid == PHY_BCM_OUI_1 ||
3714                                     phyid == PHY_BCM_OUI_2 ||
3715                                     phyid == PHY_BCM_OUI_3)
3716                                         do_low_power = true;
3717                         }
3718                 }
3719         } else {
3720                 do_low_power = true;
3721
3722                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3723                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3724
3725                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3726                         tg3_setup_phy(tp, 0);
3727         }
3728
3729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3730                 u32 val;
3731
3732                 val = tr32(GRC_VCPU_EXT_CTRL);
3733                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3734         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3735                 int i;
3736                 u32 val;
3737
3738                 for (i = 0; i < 200; i++) {
3739                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3740                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3741                                 break;
3742                         msleep(1);
3743                 }
3744         }
3745         if (tg3_flag(tp, WOL_CAP))
3746                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3747                                                      WOL_DRV_STATE_SHUTDOWN |
3748                                                      WOL_DRV_WOL |
3749                                                      WOL_SET_MAGIC_PKT);
3750
3751         if (device_should_wake) {
3752                 u32 mac_mode;
3753
3754                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3755                         if (do_low_power &&
3756                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3757                                 tg3_phy_auxctl_write(tp,
3758                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3759                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3760                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3761                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3762                                 udelay(40);
3763                         }
3764
3765                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3766                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3767                         else
3768                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3769
3770                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3772                             ASIC_REV_5700) {
3773                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3774                                              SPEED_100 : SPEED_10;
3775                                 if (tg3_5700_link_polarity(tp, speed))
3776                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3777                                 else
3778                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3779                         }
3780                 } else {
3781                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3782                 }
3783
3784                 if (!tg3_flag(tp, 5750_PLUS))
3785                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3786
3787                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3788                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3789                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3790                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3791
3792                 if (tg3_flag(tp, ENABLE_APE))
3793                         mac_mode |= MAC_MODE_APE_TX_EN |
3794                                     MAC_MODE_APE_RX_EN |
3795                                     MAC_MODE_TDE_ENABLE;
3796
3797                 tw32_f(MAC_MODE, mac_mode);
3798                 udelay(100);
3799
3800                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3801                 udelay(10);
3802         }
3803
3804         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3805             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3806              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3807                 u32 base_val;
3808
3809                 base_val = tp->pci_clock_ctrl;
3810                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3811                              CLOCK_CTRL_TXCLK_DISABLE);
3812
3813                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3814                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3815         } else if (tg3_flag(tp, 5780_CLASS) ||
3816                    tg3_flag(tp, CPMU_PRESENT) ||
3817                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3818                 /* do nothing */
3819         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3820                 u32 newbits1, newbits2;
3821
3822                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3823                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3824                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3825                                     CLOCK_CTRL_TXCLK_DISABLE |
3826                                     CLOCK_CTRL_ALTCLK);
3827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3828                 } else if (tg3_flag(tp, 5705_PLUS)) {
3829                         newbits1 = CLOCK_CTRL_625_CORE;
3830                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3831                 } else {
3832                         newbits1 = CLOCK_CTRL_ALTCLK;
3833                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3834                 }
3835
3836                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3837                             40);
3838
3839                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3840                             40);
3841
3842                 if (!tg3_flag(tp, 5705_PLUS)) {
3843                         u32 newbits3;
3844
3845                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3847                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3848                                             CLOCK_CTRL_TXCLK_DISABLE |
3849                                             CLOCK_CTRL_44MHZ_CORE);
3850                         } else {
3851                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3852                         }
3853
3854                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3855                                     tp->pci_clock_ctrl | newbits3, 40);
3856                 }
3857         }
3858
3859         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3860                 tg3_power_down_phy(tp, do_low_power);
3861
3862         tg3_frob_aux_power(tp, true);
3863
3864         /* Workaround for unstable PLL clock */
3865         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3866             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3867                 u32 val = tr32(0x7d00);
3868
3869                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3870                 tw32(0x7d00, val);
3871                 if (!tg3_flag(tp, ENABLE_ASF)) {
3872                         int err;
3873
3874                         err = tg3_nvram_lock(tp);
3875                         tg3_halt_cpu(tp, RX_CPU_BASE);
3876                         if (!err)
3877                                 tg3_nvram_unlock(tp);
3878                 }
3879         }
3880
3881         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3882
3883         return 0;
3884 }
3885
3886 static void tg3_power_down(struct tg3 *tp)
3887 {
3888         tg3_power_down_prepare(tp);
3889
3890         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3891         pci_set_power_state(tp->pdev, PCI_D3hot);
3892 }
3893
3894 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3895 {
3896         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3897         case MII_TG3_AUX_STAT_10HALF:
3898                 *speed = SPEED_10;
3899                 *duplex = DUPLEX_HALF;
3900                 break;
3901
3902         case MII_TG3_AUX_STAT_10FULL:
3903                 *speed = SPEED_10;
3904                 *duplex = DUPLEX_FULL;
3905                 break;
3906
3907         case MII_TG3_AUX_STAT_100HALF:
3908                 *speed = SPEED_100;
3909                 *duplex = DUPLEX_HALF;
3910                 break;
3911
3912         case MII_TG3_AUX_STAT_100FULL:
3913                 *speed = SPEED_100;
3914                 *duplex = DUPLEX_FULL;
3915                 break;
3916
3917         case MII_TG3_AUX_STAT_1000HALF:
3918                 *speed = SPEED_1000;
3919                 *duplex = DUPLEX_HALF;
3920                 break;
3921
3922         case MII_TG3_AUX_STAT_1000FULL:
3923                 *speed = SPEED_1000;
3924                 *duplex = DUPLEX_FULL;
3925                 break;
3926
3927         default:
3928                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3929                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3930                                  SPEED_10;
3931                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3932                                   DUPLEX_HALF;
3933                         break;
3934                 }
3935                 *speed = SPEED_UNKNOWN;
3936                 *duplex = DUPLEX_UNKNOWN;
3937                 break;
3938         }
3939 }
3940
3941 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3942 {
3943         int err = 0;
3944         u32 val, new_adv;
3945
3946         new_adv = ADVERTISE_CSMA;
3947         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3948         new_adv |= mii_advertise_flowctrl(flowctrl);
3949
3950         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3951         if (err)
3952                 goto done;
3953
3954         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3955                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3956
3957                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3958                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3959                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3960
3961                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3962                 if (err)
3963                         goto done;
3964         }
3965
3966         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3967                 goto done;
3968
3969         tw32(TG3_CPMU_EEE_MODE,
3970              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3971
3972         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3973         if (!err) {
3974                 u32 err2;
3975
3976                 val = 0;
3977                 /* Advertise 100-BaseTX EEE ability */
3978                 if (advertise & ADVERTISED_100baseT_Full)
3979                         val |= MDIO_AN_EEE_ADV_100TX;
3980                 /* Advertise 1000-BaseT EEE ability */
3981                 if (advertise & ADVERTISED_1000baseT_Full)
3982                         val |= MDIO_AN_EEE_ADV_1000T;
3983                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3984                 if (err)
3985                         val = 0;
3986
3987                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3988                 case ASIC_REV_5717:
3989                 case ASIC_REV_57765:
3990                 case ASIC_REV_57766:
3991                 case ASIC_REV_5719:
3992                         /* If we advertised any eee advertisements above... */
3993                         if (val)
3994                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3995                                       MII_TG3_DSP_TAP26_RMRXSTO |
3996                                       MII_TG3_DSP_TAP26_OPCSINPT;
3997                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3998                         /* Fall through */
3999                 case ASIC_REV_5720:
4000                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4001                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4002                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4003                 }
4004
4005                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4006                 if (!err)
4007                         err = err2;
4008         }
4009
4010 done:
4011         return err;
4012 }
4013
4014 static void tg3_phy_copper_begin(struct tg3 *tp)
4015 {
4016         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4017             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4018                 u32 adv, fc;
4019
4020                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4021                         adv = ADVERTISED_10baseT_Half |
4022                               ADVERTISED_10baseT_Full;
4023                         if (tg3_flag(tp, WOL_SPEED_100MB))
4024                                 adv |= ADVERTISED_100baseT_Half |
4025                                        ADVERTISED_100baseT_Full;
4026
4027                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4028                 } else {
4029                         adv = tp->link_config.advertising;
4030                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4031                                 adv &= ~(ADVERTISED_1000baseT_Half |
4032                                          ADVERTISED_1000baseT_Full);
4033
4034                         fc = tp->link_config.flowctrl;
4035                 }
4036
4037                 tg3_phy_autoneg_cfg(tp, adv, fc);
4038
4039                 tg3_writephy(tp, MII_BMCR,
4040                              BMCR_ANENABLE | BMCR_ANRESTART);
4041         } else {
4042                 int i;
4043                 u32 bmcr, orig_bmcr;
4044
4045                 tp->link_config.active_speed = tp->link_config.speed;
4046                 tp->link_config.active_duplex = tp->link_config.duplex;
4047
4048                 bmcr = 0;
4049                 switch (tp->link_config.speed) {
4050                 default:
4051                 case SPEED_10:
4052                         break;
4053
4054                 case SPEED_100:
4055                         bmcr |= BMCR_SPEED100;
4056                         break;
4057
4058                 case SPEED_1000:
4059                         bmcr |= BMCR_SPEED1000;
4060                         break;
4061                 }
4062
4063                 if (tp->link_config.duplex == DUPLEX_FULL)
4064                         bmcr |= BMCR_FULLDPLX;
4065
4066                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4067                     (bmcr != orig_bmcr)) {
4068                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4069                         for (i = 0; i < 1500; i++) {
4070                                 u32 tmp;
4071
4072                                 udelay(10);
4073                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4074                                     tg3_readphy(tp, MII_BMSR, &tmp))
4075                                         continue;
4076                                 if (!(tmp & BMSR_LSTATUS)) {
4077                                         udelay(40);
4078                                         break;
4079                                 }
4080                         }
4081                         tg3_writephy(tp, MII_BMCR, bmcr);
4082                         udelay(40);
4083                 }
4084         }
4085 }
4086
4087 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4088 {
4089         int err;
4090
4091         /* Turn off tap power management. */
4092         /* Set Extended packet length bit */
4093         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4094
4095         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4096         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4097         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4098         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4099         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4100
4101         udelay(40);
4102
4103         return err;
4104 }
4105
4106 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4107 {
4108         u32 advmsk, tgtadv, advertising;
4109
4110         advertising = tp->link_config.advertising;
4111         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4112
4113         advmsk = ADVERTISE_ALL;
4114         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4115                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4116                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4117         }
4118
4119         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4120                 return false;
4121
4122         if ((*lcladv & advmsk) != tgtadv)
4123                 return false;
4124
4125         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4126                 u32 tg3_ctrl;
4127
4128                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4129
4130                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4131                         return false;
4132
4133                 if (tgtadv &&
4134                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4136                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4137                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4138                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4139                 } else {
4140                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4141                 }
4142
4143                 if (tg3_ctrl != tgtadv)
4144                         return false;
4145         }
4146
4147         return true;
4148 }
4149
4150 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4151 {
4152         u32 lpeth = 0;
4153
4154         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4155                 u32 val;
4156
4157                 if (tg3_readphy(tp, MII_STAT1000, &val))
4158                         return false;
4159
4160                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4161         }
4162
4163         if (tg3_readphy(tp, MII_LPA, rmtadv))
4164                 return false;
4165
4166         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4167         tp->link_config.rmt_adv = lpeth;
4168
4169         return true;
4170 }
4171
4172 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4173 {
4174         int current_link_up;
4175         u32 bmsr, val;
4176         u32 lcl_adv, rmt_adv;
4177         u16 current_speed;
4178         u8 current_duplex;
4179         int i, err;
4180
4181         tw32(MAC_EVENT, 0);
4182
4183         tw32_f(MAC_STATUS,
4184              (MAC_STATUS_SYNC_CHANGED |
4185               MAC_STATUS_CFG_CHANGED |
4186               MAC_STATUS_MI_COMPLETION |
4187               MAC_STATUS_LNKSTATE_CHANGED));
4188         udelay(40);
4189
4190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4191                 tw32_f(MAC_MI_MODE,
4192                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4193                 udelay(80);
4194         }
4195
4196         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4197
4198         /* Some third-party PHYs need to be reset on link going
4199          * down.
4200          */
4201         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4202              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4203              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4204             netif_carrier_ok(tp->dev)) {
4205                 tg3_readphy(tp, MII_BMSR, &bmsr);
4206                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4207                     !(bmsr & BMSR_LSTATUS))
4208                         force_reset = 1;
4209         }
4210         if (force_reset)
4211                 tg3_phy_reset(tp);
4212
4213         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4214                 tg3_readphy(tp, MII_BMSR, &bmsr);
4215                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4216                     !tg3_flag(tp, INIT_COMPLETE))
4217                         bmsr = 0;
4218
4219                 if (!(bmsr & BMSR_LSTATUS)) {
4220                         err = tg3_init_5401phy_dsp(tp);
4221                         if (err)
4222                                 return err;
4223
4224                         tg3_readphy(tp, MII_BMSR, &bmsr);
4225                         for (i = 0; i < 1000; i++) {
4226                                 udelay(10);
4227                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4228                                     (bmsr & BMSR_LSTATUS)) {
4229                                         udelay(40);
4230                                         break;
4231                                 }
4232                         }
4233
4234                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4235                             TG3_PHY_REV_BCM5401_B0 &&
4236                             !(bmsr & BMSR_LSTATUS) &&
4237                             tp->link_config.active_speed == SPEED_1000) {
4238                                 err = tg3_phy_reset(tp);
4239                                 if (!err)
4240                                         err = tg3_init_5401phy_dsp(tp);
4241                                 if (err)
4242                                         return err;
4243                         }
4244                 }
4245         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4246                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4247                 /* 5701 {A0,B0} CRC bug workaround */
4248                 tg3_writephy(tp, 0x15, 0x0a75);
4249                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4250                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4251                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4252         }
4253
4254         /* Clear pending interrupts... */
4255         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4256         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4257
4258         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4259                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4260         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4261                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4262
4263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4265                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4266                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4267                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4268                 else
4269                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4270         }
4271
4272         current_link_up = 0;
4273         current_speed = SPEED_UNKNOWN;
4274         current_duplex = DUPLEX_UNKNOWN;
4275         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4276         tp->link_config.rmt_adv = 0;
4277
4278         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4279                 err = tg3_phy_auxctl_read(tp,
4280                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4281                                           &val);
4282                 if (!err && !(val & (1 << 10))) {
4283                         tg3_phy_auxctl_write(tp,
4284                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4285                                              val | (1 << 10));
4286                         goto relink;
4287                 }
4288         }
4289
4290         bmsr = 0;
4291         for (i = 0; i < 100; i++) {
4292                 tg3_readphy(tp, MII_BMSR, &bmsr);
4293                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4294                     (bmsr & BMSR_LSTATUS))
4295                         break;
4296                 udelay(40);
4297         }
4298
4299         if (bmsr & BMSR_LSTATUS) {
4300                 u32 aux_stat, bmcr;
4301
4302                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4303                 for (i = 0; i < 2000; i++) {
4304                         udelay(10);
4305                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4306                             aux_stat)
4307                                 break;
4308                 }
4309
4310                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4311                                              &current_speed,
4312                                              &current_duplex);
4313
4314                 bmcr = 0;
4315                 for (i = 0; i < 200; i++) {
4316                         tg3_readphy(tp, MII_BMCR, &bmcr);
4317                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4318                                 continue;
4319                         if (bmcr && bmcr != 0x7fff)
4320                                 break;
4321                         udelay(10);
4322                 }
4323
4324                 lcl_adv = 0;
4325                 rmt_adv = 0;
4326
4327                 tp->link_config.active_speed = current_speed;
4328                 tp->link_config.active_duplex = current_duplex;
4329
4330                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4331                         if ((bmcr & BMCR_ANENABLE) &&
4332                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4333                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4334                                 current_link_up = 1;
4335                 } else {
4336                         if (!(bmcr & BMCR_ANENABLE) &&
4337                             tp->link_config.speed == current_speed &&
4338                             tp->link_config.duplex == current_duplex &&
4339                             tp->link_config.flowctrl ==
4340                             tp->link_config.active_flowctrl) {
4341                                 current_link_up = 1;
4342                         }
4343                 }
4344
4345                 if (current_link_up == 1 &&
4346                     tp->link_config.active_duplex == DUPLEX_FULL) {
4347                         u32 reg, bit;
4348
4349                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4350                                 reg = MII_TG3_FET_GEN_STAT;
4351                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4352                         } else {
4353                                 reg = MII_TG3_EXT_STAT;
4354                                 bit = MII_TG3_EXT_STAT_MDIX;
4355                         }
4356
4357                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4358                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4359
4360                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4361                 }
4362         }
4363
4364 relink:
4365         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4366                 tg3_phy_copper_begin(tp);
4367
4368                 tg3_readphy(tp, MII_BMSR, &bmsr);
4369                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4370                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4371                         current_link_up = 1;
4372         }
4373
4374         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4375         if (current_link_up == 1) {
4376                 if (tp->link_config.active_speed == SPEED_100 ||
4377                     tp->link_config.active_speed == SPEED_10)
4378                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4379                 else
4380                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4381         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4382                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4383         else
4384                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4385
4386         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4387         if (tp->link_config.active_duplex == DUPLEX_HALF)
4388                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4389
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4391                 if (current_link_up == 1 &&
4392                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4393                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4394                 else
4395                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4396         }
4397
4398         /* ??? Without this setting Netgear GA302T PHY does not
4399          * ??? send/receive packets...
4400          */
4401         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4402             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4403                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4404                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4405                 udelay(80);
4406         }
4407
4408         tw32_f(MAC_MODE, tp->mac_mode);
4409         udelay(40);
4410
4411         tg3_phy_eee_adjust(tp, current_link_up);
4412
4413         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4414                 /* Polled via timer. */
4415                 tw32_f(MAC_EVENT, 0);
4416         } else {
4417                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4418         }
4419         udelay(40);
4420
4421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4422             current_link_up == 1 &&
4423             tp->link_config.active_speed == SPEED_1000 &&
4424             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4425                 udelay(120);
4426                 tw32_f(MAC_STATUS,
4427                      (MAC_STATUS_SYNC_CHANGED |
4428                       MAC_STATUS_CFG_CHANGED));
4429                 udelay(40);
4430                 tg3_write_mem(tp,
4431                               NIC_SRAM_FIRMWARE_MBOX,
4432                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4433         }
4434
4435         /* Prevent send BD corruption. */
4436         if (tg3_flag(tp, CLKREQ_BUG)) {
4437                 u16 oldlnkctl, newlnkctl;
4438
4439                 pci_read_config_word(tp->pdev,
4440                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4441                                      &oldlnkctl);
4442                 if (tp->link_config.active_speed == SPEED_100 ||
4443                     tp->link_config.active_speed == SPEED_10)
4444                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4445                 else
4446                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4447                 if (newlnkctl != oldlnkctl)
4448                         pci_write_config_word(tp->pdev,
4449                                               pci_pcie_cap(tp->pdev) +
4450                                               PCI_EXP_LNKCTL, newlnkctl);
4451         }
4452
4453         if (current_link_up != netif_carrier_ok(tp->dev)) {
4454                 if (current_link_up)
4455                         netif_carrier_on(tp->dev);
4456                 else
4457                         netif_carrier_off(tp->dev);
4458                 tg3_link_report(tp);
4459         }
4460
4461         return 0;
4462 }
4463
4464 struct tg3_fiber_aneginfo {
4465         int state;
4466 #define ANEG_STATE_UNKNOWN              0
4467 #define ANEG_STATE_AN_ENABLE            1
4468 #define ANEG_STATE_RESTART_INIT         2
4469 #define ANEG_STATE_RESTART              3
4470 #define ANEG_STATE_DISABLE_LINK_OK      4
4471 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4472 #define ANEG_STATE_ABILITY_DETECT       6
4473 #define ANEG_STATE_ACK_DETECT_INIT      7
4474 #define ANEG_STATE_ACK_DETECT           8
4475 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4476 #define ANEG_STATE_COMPLETE_ACK         10
4477 #define ANEG_STATE_IDLE_DETECT_INIT     11
4478 #define ANEG_STATE_IDLE_DETECT          12
4479 #define ANEG_STATE_LINK_OK              13
4480 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4481 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4482
4483         u32 flags;
4484 #define MR_AN_ENABLE            0x00000001
4485 #define MR_RESTART_AN           0x00000002
4486 #define MR_AN_COMPLETE          0x00000004
4487 #define MR_PAGE_RX              0x00000008
4488 #define MR_NP_LOADED            0x00000010
4489 #define MR_TOGGLE_TX            0x00000020
4490 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4491 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4492 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4493 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4494 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4495 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4496 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4497 #define MR_TOGGLE_RX            0x00002000
4498 #define MR_NP_RX                0x00004000
4499
4500 #define MR_LINK_OK              0x80000000
4501
4502         unsigned long link_time, cur_time;
4503
4504         u32 ability_match_cfg;
4505         int ability_match_count;
4506
4507         char ability_match, idle_match, ack_match;
4508
4509         u32 txconfig, rxconfig;
4510 #define ANEG_CFG_NP             0x00000080
4511 #define ANEG_CFG_ACK            0x00000040
4512 #define ANEG_CFG_RF2            0x00000020
4513 #define ANEG_CFG_RF1            0x00000010
4514 #define ANEG_CFG_PS2            0x00000001
4515 #define ANEG_CFG_PS1            0x00008000
4516 #define ANEG_CFG_HD             0x00004000
4517 #define ANEG_CFG_FD             0x00002000
4518 #define ANEG_CFG_INVAL          0x00001f06
4519
4520 };
4521 #define ANEG_OK         0
4522 #define ANEG_DONE       1
4523 #define ANEG_TIMER_ENAB 2
4524 #define ANEG_FAILED     -1
4525
4526 #define ANEG_STATE_SETTLE_TIME  10000
4527
4528 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4529                                    struct tg3_fiber_aneginfo *ap)
4530 {
4531         u16 flowctrl;
4532         unsigned long delta;
4533         u32 rx_cfg_reg;
4534         int ret;
4535
4536         if (ap->state == ANEG_STATE_UNKNOWN) {
4537                 ap->rxconfig = 0;
4538                 ap->link_time = 0;
4539                 ap->cur_time = 0;
4540                 ap->ability_match_cfg = 0;
4541                 ap->ability_match_count = 0;
4542                 ap->ability_match = 0;
4543                 ap->idle_match = 0;
4544                 ap->ack_match = 0;
4545         }
4546         ap->cur_time++;
4547
4548         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4549                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4550
4551                 if (rx_cfg_reg != ap->ability_match_cfg) {
4552                         ap->ability_match_cfg = rx_cfg_reg;
4553                         ap->ability_match = 0;
4554                         ap->ability_match_count = 0;
4555                 } else {
4556                         if (++ap->ability_match_count > 1) {
4557                                 ap->ability_match = 1;
4558                                 ap->ability_match_cfg = rx_cfg_reg;
4559                         }
4560                 }
4561                 if (rx_cfg_reg & ANEG_CFG_ACK)
4562                         ap->ack_match = 1;
4563                 else
4564                         ap->ack_match = 0;
4565
4566                 ap->idle_match = 0;
4567         } else {
4568                 ap->idle_match = 1;
4569                 ap->ability_match_cfg = 0;
4570                 ap->ability_match_count = 0;
4571                 ap->ability_match = 0;
4572                 ap->ack_match = 0;
4573
4574                 rx_cfg_reg = 0;
4575         }
4576
4577         ap->rxconfig = rx_cfg_reg;
4578         ret = ANEG_OK;
4579
4580         switch (ap->state) {
4581         case ANEG_STATE_UNKNOWN:
4582                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4583                         ap->state = ANEG_STATE_AN_ENABLE;
4584
4585                 /* fallthru */
4586         case ANEG_STATE_AN_ENABLE:
4587                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4588                 if (ap->flags & MR_AN_ENABLE) {
4589                         ap->link_time = 0;
4590                         ap->cur_time = 0;
4591                         ap->ability_match_cfg = 0;
4592                         ap->ability_match_count = 0;
4593                         ap->ability_match = 0;
4594                         ap->idle_match = 0;
4595                         ap->ack_match = 0;
4596
4597                         ap->state = ANEG_STATE_RESTART_INIT;
4598                 } else {
4599                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4600                 }
4601                 break;
4602
4603         case ANEG_STATE_RESTART_INIT:
4604                 ap->link_time = ap->cur_time;
4605                 ap->flags &= ~(MR_NP_LOADED);
4606                 ap->txconfig = 0;
4607                 tw32(MAC_TX_AUTO_NEG, 0);
4608                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4609                 tw32_f(MAC_MODE, tp->mac_mode);
4610                 udelay(40);
4611
4612                 ret = ANEG_TIMER_ENAB;
4613                 ap->state = ANEG_STATE_RESTART;
4614
4615                 /* fallthru */
4616         case ANEG_STATE_RESTART:
4617                 delta = ap->cur_time - ap->link_time;
4618                 if (delta > ANEG_STATE_SETTLE_TIME)
4619                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4620                 else
4621                         ret = ANEG_TIMER_ENAB;
4622                 break;
4623
4624         case ANEG_STATE_DISABLE_LINK_OK:
4625                 ret = ANEG_DONE;
4626                 break;
4627
4628         case ANEG_STATE_ABILITY_DETECT_INIT:
4629                 ap->flags &= ~(MR_TOGGLE_TX);
4630                 ap->txconfig = ANEG_CFG_FD;
4631                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4632                 if (flowctrl & ADVERTISE_1000XPAUSE)
4633                         ap->txconfig |= ANEG_CFG_PS1;
4634                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4635                         ap->txconfig |= ANEG_CFG_PS2;
4636                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4637                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4638                 tw32_f(MAC_MODE, tp->mac_mode);
4639                 udelay(40);
4640
4641                 ap->state = ANEG_STATE_ABILITY_DETECT;
4642                 break;
4643
4644         case ANEG_STATE_ABILITY_DETECT:
4645                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4646                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4647                 break;
4648
4649         case ANEG_STATE_ACK_DETECT_INIT:
4650                 ap->txconfig |= ANEG_CFG_ACK;
4651                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4652                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4653                 tw32_f(MAC_MODE, tp->mac_mode);
4654                 udelay(40);
4655
4656                 ap->state = ANEG_STATE_ACK_DETECT;
4657
4658                 /* fallthru */
4659         case ANEG_STATE_ACK_DETECT:
4660                 if (ap->ack_match != 0) {
4661                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4662                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4663                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4664                         } else {
4665                                 ap->state = ANEG_STATE_AN_ENABLE;
4666                         }
4667                 } else if (ap->ability_match != 0 &&
4668                            ap->rxconfig == 0) {
4669                         ap->state = ANEG_STATE_AN_ENABLE;
4670                 }
4671                 break;
4672
4673         case ANEG_STATE_COMPLETE_ACK_INIT:
4674                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4675                         ret = ANEG_FAILED;
4676                         break;
4677                 }
4678                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4679                                MR_LP_ADV_HALF_DUPLEX |
4680                                MR_LP_ADV_SYM_PAUSE |
4681                                MR_LP_ADV_ASYM_PAUSE |
4682                                MR_LP_ADV_REMOTE_FAULT1 |
4683                                MR_LP_ADV_REMOTE_FAULT2 |
4684                                MR_LP_ADV_NEXT_PAGE |
4685                                MR_TOGGLE_RX |
4686                                MR_NP_RX);
4687                 if (ap->rxconfig & ANEG_CFG_FD)
4688                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4689                 if (ap->rxconfig & ANEG_CFG_HD)
4690                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4691                 if (ap->rxconfig & ANEG_CFG_PS1)
4692                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4693                 if (ap->rxconfig & ANEG_CFG_PS2)
4694                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4695                 if (ap->rxconfig & ANEG_CFG_RF1)
4696                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4697                 if (ap->rxconfig & ANEG_CFG_RF2)
4698                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4699                 if (ap->rxconfig & ANEG_CFG_NP)
4700                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4701
4702                 ap->link_time = ap->cur_time;
4703
4704                 ap->flags ^= (MR_TOGGLE_TX);
4705                 if (ap->rxconfig & 0x0008)
4706                         ap->flags |= MR_TOGGLE_RX;
4707                 if (ap->rxconfig & ANEG_CFG_NP)
4708                         ap->flags |= MR_NP_RX;
4709                 ap->flags |= MR_PAGE_RX;
4710
4711                 ap->state = ANEG_STATE_COMPLETE_ACK;
4712                 ret = ANEG_TIMER_ENAB;
4713                 break;
4714
4715         case ANEG_STATE_COMPLETE_ACK:
4716                 if (ap->ability_match != 0 &&
4717                     ap->rxconfig == 0) {
4718                         ap->state = ANEG_STATE_AN_ENABLE;
4719                         break;
4720                 }
4721                 delta = ap->cur_time - ap->link_time;
4722                 if (delta > ANEG_STATE_SETTLE_TIME) {
4723                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4724                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4725                         } else {
4726                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4727                                     !(ap->flags & MR_NP_RX)) {
4728                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4729                                 } else {
4730                                         ret = ANEG_FAILED;
4731                                 }
4732                         }
4733                 }
4734                 break;
4735
4736         case ANEG_STATE_IDLE_DETECT_INIT:
4737                 ap->link_time = ap->cur_time;
4738                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4739                 tw32_f(MAC_MODE, tp->mac_mode);
4740                 udelay(40);
4741
4742                 ap->state = ANEG_STATE_IDLE_DETECT;
4743                 ret = ANEG_TIMER_ENAB;
4744                 break;
4745
4746         case ANEG_STATE_IDLE_DETECT:
4747                 if (ap->ability_match != 0 &&
4748                     ap->rxconfig == 0) {
4749                         ap->state = ANEG_STATE_AN_ENABLE;
4750                         break;
4751                 }
4752                 delta = ap->cur_time - ap->link_time;
4753                 if (delta > ANEG_STATE_SETTLE_TIME) {
4754                         /* XXX another gem from the Broadcom driver :( */
4755                         ap->state = ANEG_STATE_LINK_OK;
4756                 }
4757                 break;
4758
4759         case ANEG_STATE_LINK_OK:
4760                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4761                 ret = ANEG_DONE;
4762                 break;
4763
4764         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4765                 /* ??? unimplemented */
4766                 break;
4767
4768         case ANEG_STATE_NEXT_PAGE_WAIT:
4769                 /* ??? unimplemented */
4770                 break;
4771
4772         default:
4773                 ret = ANEG_FAILED;
4774                 break;
4775         }
4776
4777         return ret;
4778 }
4779
4780 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4781 {
4782         int res = 0;
4783         struct tg3_fiber_aneginfo aninfo;
4784         int status = ANEG_FAILED;
4785         unsigned int tick;
4786         u32 tmp;
4787
4788         tw32_f(MAC_TX_AUTO_NEG, 0);
4789
4790         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4791         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4792         udelay(40);
4793
4794         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4795         udelay(40);
4796
4797         memset(&aninfo, 0, sizeof(aninfo));
4798         aninfo.flags |= MR_AN_ENABLE;
4799         aninfo.state = ANEG_STATE_UNKNOWN;
4800         aninfo.cur_time = 0;
4801         tick = 0;
4802         while (++tick < 195000) {
4803                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4804                 if (status == ANEG_DONE || status == ANEG_FAILED)
4805                         break;
4806
4807                 udelay(1);
4808         }
4809
4810         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4811         tw32_f(MAC_MODE, tp->mac_mode);
4812         udelay(40);
4813
4814         *txflags = aninfo.txconfig;
4815         *rxflags = aninfo.flags;
4816
4817         if (status == ANEG_DONE &&
4818             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4819                              MR_LP_ADV_FULL_DUPLEX)))
4820                 res = 1;
4821
4822         return res;
4823 }
4824
4825 static void tg3_init_bcm8002(struct tg3 *tp)
4826 {
4827         u32 mac_status = tr32(MAC_STATUS);
4828         int i;
4829
4830         /* Reset when initting first time or we have a link. */
4831         if (tg3_flag(tp, INIT_COMPLETE) &&
4832             !(mac_status & MAC_STATUS_PCS_SYNCED))
4833                 return;
4834
4835         /* Set PLL lock range. */
4836         tg3_writephy(tp, 0x16, 0x8007);
4837
4838         /* SW reset */
4839         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4840
4841         /* Wait for reset to complete. */
4842         /* XXX schedule_timeout() ... */
4843         for (i = 0; i < 500; i++)
4844                 udelay(10);
4845
4846         /* Config mode; select PMA/Ch 1 regs. */
4847         tg3_writephy(tp, 0x10, 0x8411);
4848
4849         /* Enable auto-lock and comdet, select txclk for tx. */
4850         tg3_writephy(tp, 0x11, 0x0a10);
4851
4852         tg3_writephy(tp, 0x18, 0x00a0);
4853         tg3_writephy(tp, 0x16, 0x41ff);
4854
4855         /* Assert and deassert POR. */
4856         tg3_writephy(tp, 0x13, 0x0400);
4857         udelay(40);
4858         tg3_writephy(tp, 0x13, 0x0000);
4859
4860         tg3_writephy(tp, 0x11, 0x0a50);
4861         udelay(40);
4862         tg3_writephy(tp, 0x11, 0x0a10);
4863
4864         /* Wait for signal to stabilize */
4865         /* XXX schedule_timeout() ... */
4866         for (i = 0; i < 15000; i++)
4867                 udelay(10);
4868
4869         /* Deselect the channel register so we can read the PHYID
4870          * later.
4871          */
4872         tg3_writephy(tp, 0x10, 0x8011);
4873 }
4874
4875 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4876 {
4877         u16 flowctrl;
4878         u32 sg_dig_ctrl, sg_dig_status;
4879         u32 serdes_cfg, expected_sg_dig_ctrl;
4880         int workaround, port_a;
4881         int current_link_up;
4882
4883         serdes_cfg = 0;
4884         expected_sg_dig_ctrl = 0;
4885         workaround = 0;
4886         port_a = 1;
4887         current_link_up = 0;
4888
4889         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4890             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4891                 workaround = 1;
4892                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4893                         port_a = 0;
4894
4895                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4896                 /* preserve bits 20-23 for voltage regulator */
4897                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4898         }
4899
4900         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4901
4902         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4903                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4904                         if (workaround) {
4905                                 u32 val = serdes_cfg;
4906
4907                                 if (port_a)
4908                                         val |= 0xc010000;
4909                                 else
4910                                         val |= 0x4010000;
4911                                 tw32_f(MAC_SERDES_CFG, val);
4912                         }
4913
4914                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4915                 }
4916                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4917                         tg3_setup_flow_control(tp, 0, 0);
4918                         current_link_up = 1;
4919                 }
4920                 goto out;
4921         }
4922
4923         /* Want auto-negotiation.  */
4924         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4925
4926         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4927         if (flowctrl & ADVERTISE_1000XPAUSE)
4928                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4929         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4930                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4931
4932         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4933                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4934                     tp->serdes_counter &&
4935                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4936                                     MAC_STATUS_RCVD_CFG)) ==
4937                      MAC_STATUS_PCS_SYNCED)) {
4938                         tp->serdes_counter--;
4939                         current_link_up = 1;
4940                         goto out;
4941                 }
4942 restart_autoneg:
4943                 if (workaround)
4944                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4945                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4946                 udelay(5);
4947                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4948
4949                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4950                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4951         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4952                                  MAC_STATUS_SIGNAL_DET)) {
4953                 sg_dig_status = tr32(SG_DIG_STATUS);
4954                 mac_status = tr32(MAC_STATUS);
4955
4956                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4957                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4958                         u32 local_adv = 0, remote_adv = 0;
4959
4960                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4961                                 local_adv |= ADVERTISE_1000XPAUSE;
4962                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4963                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4964
4965                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4966                                 remote_adv |= LPA_1000XPAUSE;
4967                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4968                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4969
4970                         tp->link_config.rmt_adv =
4971                                            mii_adv_to_ethtool_adv_x(remote_adv);
4972
4973                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4974                         current_link_up = 1;
4975                         tp->serdes_counter = 0;
4976                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4977                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4978                         if (tp->serdes_counter)
4979                                 tp->serdes_counter--;
4980                         else {
4981                                 if (workaround) {
4982                                         u32 val = serdes_cfg;
4983
4984                                         if (port_a)
4985                                                 val |= 0xc010000;
4986                                         else
4987                                                 val |= 0x4010000;
4988
4989                                         tw32_f(MAC_SERDES_CFG, val);
4990                                 }
4991
4992                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4993                                 udelay(40);
4994
4995                                 /* Link parallel detection - link is up */
4996                                 /* only if we have PCS_SYNC and not */
4997                                 /* receiving config code words */
4998                                 mac_status = tr32(MAC_STATUS);
4999                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5000                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5001                                         tg3_setup_flow_control(tp, 0, 0);
5002                                         current_link_up = 1;
5003                                         tp->phy_flags |=
5004                                                 TG3_PHYFLG_PARALLEL_DETECT;
5005                                         tp->serdes_counter =
5006                                                 SERDES_PARALLEL_DET_TIMEOUT;
5007                                 } else
5008                                         goto restart_autoneg;
5009                         }
5010                 }
5011         } else {
5012                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5013                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5014         }
5015
5016 out:
5017         return current_link_up;
5018 }
5019
5020 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5021 {
5022         int current_link_up = 0;
5023
5024         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5025                 goto out;
5026
5027         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5028                 u32 txflags, rxflags;
5029                 int i;
5030
5031                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5032                         u32 local_adv = 0, remote_adv = 0;
5033
5034                         if (txflags & ANEG_CFG_PS1)
5035                                 local_adv |= ADVERTISE_1000XPAUSE;
5036                         if (txflags & ANEG_CFG_PS2)
5037                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5038
5039                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5040                                 remote_adv |= LPA_1000XPAUSE;
5041                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5042                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5043
5044                         tp->link_config.rmt_adv =
5045                                            mii_adv_to_ethtool_adv_x(remote_adv);
5046
5047                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5048
5049                         current_link_up = 1;
5050                 }
5051                 for (i = 0; i < 30; i++) {
5052                         udelay(20);
5053                         tw32_f(MAC_STATUS,
5054                                (MAC_STATUS_SYNC_CHANGED |
5055                                 MAC_STATUS_CFG_CHANGED));
5056                         udelay(40);
5057                         if ((tr32(MAC_STATUS) &
5058                              (MAC_STATUS_SYNC_CHANGED |
5059                               MAC_STATUS_CFG_CHANGED)) == 0)
5060                                 break;
5061                 }
5062
5063                 mac_status = tr32(MAC_STATUS);
5064                 if (current_link_up == 0 &&
5065                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5066                     !(mac_status & MAC_STATUS_RCVD_CFG))
5067                         current_link_up = 1;
5068         } else {
5069                 tg3_setup_flow_control(tp, 0, 0);
5070
5071                 /* Forcing 1000FD link up. */
5072                 current_link_up = 1;
5073
5074                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5075                 udelay(40);
5076
5077                 tw32_f(MAC_MODE, tp->mac_mode);
5078                 udelay(40);
5079         }
5080
5081 out:
5082         return current_link_up;
5083 }
5084
5085 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5086 {
5087         u32 orig_pause_cfg;
5088         u16 orig_active_speed;
5089         u8 orig_active_duplex;
5090         u32 mac_status;
5091         int current_link_up;
5092         int i;
5093
5094         orig_pause_cfg = tp->link_config.active_flowctrl;
5095         orig_active_speed = tp->link_config.active_speed;
5096         orig_active_duplex = tp->link_config.active_duplex;
5097
5098         if (!tg3_flag(tp, HW_AUTONEG) &&
5099             netif_carrier_ok(tp->dev) &&
5100             tg3_flag(tp, INIT_COMPLETE)) {
5101                 mac_status = tr32(MAC_STATUS);
5102                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5103                                MAC_STATUS_SIGNAL_DET |
5104                                MAC_STATUS_CFG_CHANGED |
5105                                MAC_STATUS_RCVD_CFG);
5106                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5107                                    MAC_STATUS_SIGNAL_DET)) {
5108                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5109                                             MAC_STATUS_CFG_CHANGED));
5110                         return 0;
5111                 }
5112         }
5113
5114         tw32_f(MAC_TX_AUTO_NEG, 0);
5115
5116         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5117         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5118         tw32_f(MAC_MODE, tp->mac_mode);
5119         udelay(40);
5120
5121         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5122                 tg3_init_bcm8002(tp);
5123
5124         /* Enable link change event even when serdes polling.  */
5125         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5126         udelay(40);
5127
5128         current_link_up = 0;
5129         tp->link_config.rmt_adv = 0;
5130         mac_status = tr32(MAC_STATUS);
5131
5132         if (tg3_flag(tp, HW_AUTONEG))
5133                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5134         else
5135                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5136
5137         tp->napi[0].hw_status->status =
5138                 (SD_STATUS_UPDATED |
5139                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5140
5141         for (i = 0; i < 100; i++) {
5142                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5143                                     MAC_STATUS_CFG_CHANGED));
5144                 udelay(5);
5145                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5146                                          MAC_STATUS_CFG_CHANGED |
5147                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5148                         break;
5149         }
5150
5151         mac_status = tr32(MAC_STATUS);
5152         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5153                 current_link_up = 0;
5154                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5155                     tp->serdes_counter == 0) {
5156                         tw32_f(MAC_MODE, (tp->mac_mode |
5157                                           MAC_MODE_SEND_CONFIGS));
5158                         udelay(1);
5159                         tw32_f(MAC_MODE, tp->mac_mode);
5160                 }
5161         }
5162
5163         if (current_link_up == 1) {
5164                 tp->link_config.active_speed = SPEED_1000;
5165                 tp->link_config.active_duplex = DUPLEX_FULL;
5166                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5167                                     LED_CTRL_LNKLED_OVERRIDE |
5168                                     LED_CTRL_1000MBPS_ON));
5169         } else {
5170                 tp->link_config.active_speed = SPEED_UNKNOWN;
5171                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5173                                     LED_CTRL_LNKLED_OVERRIDE |
5174                                     LED_CTRL_TRAFFIC_OVERRIDE));
5175         }
5176
5177         if (current_link_up != netif_carrier_ok(tp->dev)) {
5178                 if (current_link_up)
5179                         netif_carrier_on(tp->dev);
5180                 else
5181                         netif_carrier_off(tp->dev);
5182                 tg3_link_report(tp);
5183         } else {
5184                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5185                 if (orig_pause_cfg != now_pause_cfg ||
5186                     orig_active_speed != tp->link_config.active_speed ||
5187                     orig_active_duplex != tp->link_config.active_duplex)
5188                         tg3_link_report(tp);
5189         }
5190
5191         return 0;
5192 }
5193
5194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5195 {
5196         int current_link_up, err = 0;
5197         u32 bmsr, bmcr;
5198         u16 current_speed;
5199         u8 current_duplex;
5200         u32 local_adv, remote_adv;
5201
5202         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5203         tw32_f(MAC_MODE, tp->mac_mode);
5204         udelay(40);
5205
5206         tw32(MAC_EVENT, 0);
5207
5208         tw32_f(MAC_STATUS,
5209              (MAC_STATUS_SYNC_CHANGED |
5210               MAC_STATUS_CFG_CHANGED |
5211               MAC_STATUS_MI_COMPLETION |
5212               MAC_STATUS_LNKSTATE_CHANGED));
5213         udelay(40);
5214
5215         if (force_reset)
5216                 tg3_phy_reset(tp);
5217
5218         current_link_up = 0;
5219         current_speed = SPEED_UNKNOWN;
5220         current_duplex = DUPLEX_UNKNOWN;
5221         tp->link_config.rmt_adv = 0;
5222
5223         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5224         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5226                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5227                         bmsr |= BMSR_LSTATUS;
5228                 else
5229                         bmsr &= ~BMSR_LSTATUS;
5230         }
5231
5232         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5233
5234         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5235             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5236                 /* do nothing, just check for link up at the end */
5237         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5238                 u32 adv, newadv;
5239
5240                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5241                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5242                                  ADVERTISE_1000XPAUSE |
5243                                  ADVERTISE_1000XPSE_ASYM |
5244                                  ADVERTISE_SLCT);
5245
5246                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5248
5249                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5250                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5251                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5252                         tg3_writephy(tp, MII_BMCR, bmcr);
5253
5254                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5255                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5256                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5257
5258                         return err;
5259                 }
5260         } else {
5261                 u32 new_bmcr;
5262
5263                 bmcr &= ~BMCR_SPEED1000;
5264                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5265
5266                 if (tp->link_config.duplex == DUPLEX_FULL)
5267                         new_bmcr |= BMCR_FULLDPLX;
5268
5269                 if (new_bmcr != bmcr) {
5270                         /* BMCR_SPEED1000 is a reserved bit that needs
5271                          * to be set on write.
5272                          */
5273                         new_bmcr |= BMCR_SPEED1000;
5274
5275                         /* Force a linkdown */
5276                         if (netif_carrier_ok(tp->dev)) {
5277                                 u32 adv;
5278
5279                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280                                 adv &= ~(ADVERTISE_1000XFULL |
5281                                          ADVERTISE_1000XHALF |
5282                                          ADVERTISE_SLCT);
5283                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5284                                 tg3_writephy(tp, MII_BMCR, bmcr |
5285                                                            BMCR_ANRESTART |
5286                                                            BMCR_ANENABLE);
5287                                 udelay(10);
5288                                 netif_carrier_off(tp->dev);
5289                         }
5290                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5291                         bmcr = new_bmcr;
5292                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5293                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5294                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5295                             ASIC_REV_5714) {
5296                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5297                                         bmsr |= BMSR_LSTATUS;
5298                                 else
5299                                         bmsr &= ~BMSR_LSTATUS;
5300                         }
5301                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5302                 }
5303         }
5304
5305         if (bmsr & BMSR_LSTATUS) {
5306                 current_speed = SPEED_1000;
5307                 current_link_up = 1;
5308                 if (bmcr & BMCR_FULLDPLX)
5309                         current_duplex = DUPLEX_FULL;
5310                 else
5311                         current_duplex = DUPLEX_HALF;
5312
5313                 local_adv = 0;
5314                 remote_adv = 0;
5315
5316                 if (bmcr & BMCR_ANENABLE) {
5317                         u32 common;
5318
5319                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5320                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5321                         common = local_adv & remote_adv;
5322                         if (common & (ADVERTISE_1000XHALF |
5323                                       ADVERTISE_1000XFULL)) {
5324                                 if (common & ADVERTISE_1000XFULL)
5325                                         current_duplex = DUPLEX_FULL;
5326                                 else
5327                                         current_duplex = DUPLEX_HALF;
5328
5329                                 tp->link_config.rmt_adv =
5330                                            mii_adv_to_ethtool_adv_x(remote_adv);
5331                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5332                                 /* Link is up via parallel detect */
5333                         } else {
5334                                 current_link_up = 0;
5335                         }
5336                 }
5337         }
5338
5339         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5340                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5341
5342         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5343         if (tp->link_config.active_duplex == DUPLEX_HALF)
5344                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5345
5346         tw32_f(MAC_MODE, tp->mac_mode);
5347         udelay(40);
5348
5349         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5350
5351         tp->link_config.active_speed = current_speed;
5352         tp->link_config.active_duplex = current_duplex;
5353
5354         if (current_link_up != netif_carrier_ok(tp->dev)) {
5355                 if (current_link_up)
5356                         netif_carrier_on(tp->dev);
5357                 else {
5358                         netif_carrier_off(tp->dev);
5359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5360                 }
5361                 tg3_link_report(tp);
5362         }
5363         return err;
5364 }
5365
5366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5367 {
5368         if (tp->serdes_counter) {
5369                 /* Give autoneg time to complete. */
5370                 tp->serdes_counter--;
5371                 return;
5372         }
5373
5374         if (!netif_carrier_ok(tp->dev) &&
5375             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5376                 u32 bmcr;
5377
5378                 tg3_readphy(tp, MII_BMCR, &bmcr);
5379                 if (bmcr & BMCR_ANENABLE) {
5380                         u32 phy1, phy2;
5381
5382                         /* Select shadow register 0x1f */
5383                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5384                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5385
5386                         /* Select expansion interrupt status register */
5387                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5388                                          MII_TG3_DSP_EXP1_INT_STAT);
5389                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5390                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5391
5392                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5393                                 /* We have signal detect and not receiving
5394                                  * config code words, link is up by parallel
5395                                  * detection.
5396                                  */
5397
5398                                 bmcr &= ~BMCR_ANENABLE;
5399                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5400                                 tg3_writephy(tp, MII_BMCR, bmcr);
5401                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5402                         }
5403                 }
5404         } else if (netif_carrier_ok(tp->dev) &&
5405                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5406                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5407                 u32 phy2;
5408
5409                 /* Select expansion interrupt status register */
5410                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5411                                  MII_TG3_DSP_EXP1_INT_STAT);
5412                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5413                 if (phy2 & 0x20) {
5414                         u32 bmcr;
5415
5416                         /* Config code words received, turn on autoneg. */
5417                         tg3_readphy(tp, MII_BMCR, &bmcr);
5418                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5419
5420                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5421
5422                 }
5423         }
5424 }
5425
5426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5427 {
5428         u32 val;
5429         int err;
5430
5431         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5432                 err = tg3_setup_fiber_phy(tp, force_reset);
5433         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5434                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5435         else
5436                 err = tg3_setup_copper_phy(tp, force_reset);
5437
5438         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5439                 u32 scale;
5440
5441                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5442                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5443                         scale = 65;
5444                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5445                         scale = 6;
5446                 else
5447                         scale = 12;
5448
5449                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5450                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5451                 tw32(GRC_MISC_CFG, val);
5452         }
5453
5454         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5455               (6 << TX_LENGTHS_IPG_SHIFT);
5456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5457                 val |= tr32(MAC_TX_LENGTHS) &
5458                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5459                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5460
5461         if (tp->link_config.active_speed == SPEED_1000 &&
5462             tp->link_config.active_duplex == DUPLEX_HALF)
5463                 tw32(MAC_TX_LENGTHS, val |
5464                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5465         else
5466                 tw32(MAC_TX_LENGTHS, val |
5467                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5468
5469         if (!tg3_flag(tp, 5705_PLUS)) {
5470                 if (netif_carrier_ok(tp->dev)) {
5471                         tw32(HOSTCC_STAT_COAL_TICKS,
5472                              tp->coal.stats_block_coalesce_usecs);
5473                 } else {
5474                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5475                 }
5476         }
5477
5478         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5479                 val = tr32(PCIE_PWR_MGMT_THRESH);
5480                 if (!netif_carrier_ok(tp->dev))
5481                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5482                               tp->pwrmgmt_thresh;
5483                 else
5484                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5485                 tw32(PCIE_PWR_MGMT_THRESH, val);
5486         }
5487
5488         return err;
5489 }
5490
5491 static inline int tg3_irq_sync(struct tg3 *tp)
5492 {
5493         return tp->irq_sync;
5494 }
5495
5496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5497 {
5498         int i;
5499
5500         dst = (u32 *)((u8 *)dst + off);
5501         for (i = 0; i < len; i += sizeof(u32))
5502                 *dst++ = tr32(off + i);
5503 }
5504
5505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5506 {
5507         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5508         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5509         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5510         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5511         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5512         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5513         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5514         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5515         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5516         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5517         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5518         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5519         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5520         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5521         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5522         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5523         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5524         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5525         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5526
5527         if (tg3_flag(tp, SUPPORT_MSIX))
5528                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5529
5530         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5531         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5532         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5533         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5534         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5535         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5536         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5537         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5538
5539         if (!tg3_flag(tp, 5705_PLUS)) {
5540                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5541                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5542                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5543         }
5544
5545         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5546         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5547         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5548         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5549         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5550
5551         if (tg3_flag(tp, NVRAM))
5552                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5553 }
5554
5555 static void tg3_dump_state(struct tg3 *tp)
5556 {
5557         int i;
5558         u32 *regs;
5559
5560         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5561         if (!regs) {
5562                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5563                 return;
5564         }
5565
5566         if (tg3_flag(tp, PCI_EXPRESS)) {
5567                 /* Read up to but not including private PCI registers */
5568                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5569                         regs[i / sizeof(u32)] = tr32(i);
5570         } else
5571                 tg3_dump_legacy_regs(tp, regs);
5572
5573         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5574                 if (!regs[i + 0] && !regs[i + 1] &&
5575                     !regs[i + 2] && !regs[i + 3])
5576                         continue;
5577
5578                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5579                            i * 4,
5580                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5581         }
5582
5583         kfree(regs);
5584
5585         for (i = 0; i < tp->irq_cnt; i++) {
5586                 struct tg3_napi *tnapi = &tp->napi[i];
5587
5588                 /* SW status block */
5589                 netdev_err(tp->dev,
5590                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5591                            i,
5592                            tnapi->hw_status->status,
5593                            tnapi->hw_status->status_tag,
5594                            tnapi->hw_status->rx_jumbo_consumer,
5595                            tnapi->hw_status->rx_consumer,
5596                            tnapi->hw_status->rx_mini_consumer,
5597                            tnapi->hw_status->idx[0].rx_producer,
5598                            tnapi->hw_status->idx[0].tx_consumer);
5599
5600                 netdev_err(tp->dev,
5601                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5602                            i,
5603                            tnapi->last_tag, tnapi->last_irq_tag,
5604                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5605                            tnapi->rx_rcb_ptr,
5606                            tnapi->prodring.rx_std_prod_idx,
5607                            tnapi->prodring.rx_std_cons_idx,
5608                            tnapi->prodring.rx_jmb_prod_idx,
5609                            tnapi->prodring.rx_jmb_cons_idx);
5610         }
5611 }
5612
5613 /* This is called whenever we suspect that the system chipset is re-
5614  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5615  * is bogus tx completions. We try to recover by setting the
5616  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5617  * in the workqueue.
5618  */
5619 static void tg3_tx_recover(struct tg3 *tp)
5620 {
5621         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5622                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5623
5624         netdev_warn(tp->dev,
5625                     "The system may be re-ordering memory-mapped I/O "
5626                     "cycles to the network device, attempting to recover. "
5627                     "Please report the problem to the driver maintainer "
5628                     "and include system chipset information.\n");
5629
5630         spin_lock(&tp->lock);
5631         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5632         spin_unlock(&tp->lock);
5633 }
5634
5635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5636 {
5637         /* Tell compiler to fetch tx indices from memory. */
5638         barrier();
5639         return tnapi->tx_pending -
5640                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5641 }
5642
5643 /* Tigon3 never reports partial packet sends.  So we do not
5644  * need special logic to handle SKBs that have not had all
5645  * of their frags sent yet, like SunGEM does.
5646  */
5647 static void tg3_tx(struct tg3_napi *tnapi)
5648 {
5649         struct tg3 *tp = tnapi->tp;
5650         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5651         u32 sw_idx = tnapi->tx_cons;
5652         struct netdev_queue *txq;
5653         int index = tnapi - tp->napi;
5654         unsigned int pkts_compl = 0, bytes_compl = 0;
5655
5656         if (tg3_flag(tp, ENABLE_TSS))
5657                 index--;
5658
5659         txq = netdev_get_tx_queue(tp->dev, index);
5660
5661         while (sw_idx != hw_idx) {
5662                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5663                 struct sk_buff *skb = ri->skb;
5664                 int i, tx_bug = 0;
5665
5666                 if (unlikely(skb == NULL)) {
5667                         tg3_tx_recover(tp);
5668                         return;
5669                 }
5670
5671                 pci_unmap_single(tp->pdev,
5672                                  dma_unmap_addr(ri, mapping),
5673                                  skb_headlen(skb),
5674                                  PCI_DMA_TODEVICE);
5675
5676                 ri->skb = NULL;
5677
5678                 while (ri->fragmented) {
5679                         ri->fragmented = false;
5680                         sw_idx = NEXT_TX(sw_idx);
5681                         ri = &tnapi->tx_buffers[sw_idx];
5682                 }
5683
5684                 sw_idx = NEXT_TX(sw_idx);
5685
5686                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5687                         ri = &tnapi->tx_buffers[sw_idx];
5688                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5689                                 tx_bug = 1;
5690
5691                         pci_unmap_page(tp->pdev,
5692                                        dma_unmap_addr(ri, mapping),
5693                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5694                                        PCI_DMA_TODEVICE);
5695
5696                         while (ri->fragmented) {
5697                                 ri->fragmented = false;
5698                                 sw_idx = NEXT_TX(sw_idx);
5699                                 ri = &tnapi->tx_buffers[sw_idx];
5700                         }
5701
5702                         sw_idx = NEXT_TX(sw_idx);
5703                 }
5704
5705                 pkts_compl++;
5706                 bytes_compl += skb->len;
5707
5708                 dev_kfree_skb(skb);
5709
5710                 if (unlikely(tx_bug)) {
5711                         tg3_tx_recover(tp);
5712                         return;
5713                 }
5714         }
5715
5716         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5717
5718         tnapi->tx_cons = sw_idx;
5719
5720         /* Need to make the tx_cons update visible to tg3_start_xmit()
5721          * before checking for netif_queue_stopped().  Without the
5722          * memory barrier, there is a small possibility that tg3_start_xmit()
5723          * will miss it and cause the queue to be stopped forever.
5724          */
5725         smp_mb();
5726
5727         if (unlikely(netif_tx_queue_stopped(txq) &&
5728                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5729                 __netif_tx_lock(txq, smp_processor_id());
5730                 if (netif_tx_queue_stopped(txq) &&
5731                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5732                         netif_tx_wake_queue(txq);
5733                 __netif_tx_unlock(txq);
5734         }
5735 }
5736
5737 static void tg3_frag_free(bool is_frag, void *data)
5738 {
5739         if (is_frag)
5740                 put_page(virt_to_head_page(data));
5741         else
5742                 kfree(data);
5743 }
5744
5745 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5746 {
5747         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5748                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5749
5750         if (!ri->data)
5751                 return;
5752
5753         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5754                          map_sz, PCI_DMA_FROMDEVICE);
5755         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5756         ri->data = NULL;
5757 }
5758
5759
5760 /* Returns size of skb allocated or < 0 on error.
5761  *
5762  * We only need to fill in the address because the other members
5763  * of the RX descriptor are invariant, see tg3_init_rings.
5764  *
5765  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5766  * posting buffers we only dirty the first cache line of the RX
5767  * descriptor (containing the address).  Whereas for the RX status
5768  * buffers the cpu only reads the last cacheline of the RX descriptor
5769  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5770  */
5771 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5772                              u32 opaque_key, u32 dest_idx_unmasked,
5773                              unsigned int *frag_size)
5774 {
5775         struct tg3_rx_buffer_desc *desc;
5776         struct ring_info *map;
5777         u8 *data;
5778         dma_addr_t mapping;
5779         int skb_size, data_size, dest_idx;
5780
5781         switch (opaque_key) {
5782         case RXD_OPAQUE_RING_STD:
5783                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5784                 desc = &tpr->rx_std[dest_idx];
5785                 map = &tpr->rx_std_buffers[dest_idx];
5786                 data_size = tp->rx_pkt_map_sz;
5787                 break;
5788
5789         case RXD_OPAQUE_RING_JUMBO:
5790                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5791                 desc = &tpr->rx_jmb[dest_idx].std;
5792                 map = &tpr->rx_jmb_buffers[dest_idx];
5793                 data_size = TG3_RX_JMB_MAP_SZ;
5794                 break;
5795
5796         default:
5797                 return -EINVAL;
5798         }
5799
5800         /* Do not overwrite any of the map or rp information
5801          * until we are sure we can commit to a new buffer.
5802          *
5803          * Callers depend upon this behavior and assume that
5804          * we leave everything unchanged if we fail.
5805          */
5806         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5807                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5808         if (skb_size <= PAGE_SIZE) {
5809                 data = netdev_alloc_frag(skb_size);
5810                 *frag_size = skb_size;
5811         } else {
5812                 data = kmalloc(skb_size, GFP_ATOMIC);
5813                 *frag_size = 0;
5814         }
5815         if (!data)
5816                 return -ENOMEM;
5817
5818         mapping = pci_map_single(tp->pdev,
5819                                  data + TG3_RX_OFFSET(tp),
5820                                  data_size,
5821                                  PCI_DMA_FROMDEVICE);
5822         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5823                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5824                 return -EIO;
5825         }
5826
5827         map->data = data;
5828         dma_unmap_addr_set(map, mapping, mapping);
5829
5830         desc->addr_hi = ((u64)mapping >> 32);
5831         desc->addr_lo = ((u64)mapping & 0xffffffff);
5832
5833         return data_size;
5834 }
5835
5836 /* We only need to move over in the address because the other
5837  * members of the RX descriptor are invariant.  See notes above
5838  * tg3_alloc_rx_data for full details.
5839  */
5840 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5841                            struct tg3_rx_prodring_set *dpr,
5842                            u32 opaque_key, int src_idx,
5843                            u32 dest_idx_unmasked)
5844 {
5845         struct tg3 *tp = tnapi->tp;
5846         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5847         struct ring_info *src_map, *dest_map;
5848         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5849         int dest_idx;
5850
5851         switch (opaque_key) {
5852         case RXD_OPAQUE_RING_STD:
5853                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5854                 dest_desc = &dpr->rx_std[dest_idx];
5855                 dest_map = &dpr->rx_std_buffers[dest_idx];
5856                 src_desc = &spr->rx_std[src_idx];
5857                 src_map = &spr->rx_std_buffers[src_idx];
5858                 break;
5859
5860         case RXD_OPAQUE_RING_JUMBO:
5861                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5862                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5863                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5864                 src_desc = &spr->rx_jmb[src_idx].std;
5865                 src_map = &spr->rx_jmb_buffers[src_idx];
5866                 break;
5867
5868         default:
5869                 return;
5870         }
5871
5872         dest_map->data = src_map->data;
5873         dma_unmap_addr_set(dest_map, mapping,
5874                            dma_unmap_addr(src_map, mapping));
5875         dest_desc->addr_hi = src_desc->addr_hi;
5876         dest_desc->addr_lo = src_desc->addr_lo;
5877
5878         /* Ensure that the update to the skb happens after the physical
5879          * addresses have been transferred to the new BD location.
5880          */
5881         smp_wmb();
5882
5883         src_map->data = NULL;
5884 }
5885
5886 /* The RX ring scheme is composed of multiple rings which post fresh
5887  * buffers to the chip, and one special ring the chip uses to report
5888  * status back to the host.
5889  *
5890  * The special ring reports the status of received packets to the
5891  * host.  The chip does not write into the original descriptor the
5892  * RX buffer was obtained from.  The chip simply takes the original
5893  * descriptor as provided by the host, updates the status and length
5894  * field, then writes this into the next status ring entry.
5895  *
5896  * Each ring the host uses to post buffers to the chip is described
5897  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5898  * it is first placed into the on-chip ram.  When the packet's length
5899  * is known, it walks down the TG3_BDINFO entries to select the ring.
5900  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5901  * which is within the range of the new packet's length is chosen.
5902  *
5903  * The "separate ring for rx status" scheme may sound queer, but it makes
5904  * sense from a cache coherency perspective.  If only the host writes
5905  * to the buffer post rings, and only the chip writes to the rx status
5906  * rings, then cache lines never move beyond shared-modified state.
5907  * If both the host and chip were to write into the same ring, cache line
5908  * eviction could occur since both entities want it in an exclusive state.
5909  */
5910 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5911 {
5912         struct tg3 *tp = tnapi->tp;
5913         u32 work_mask, rx_std_posted = 0;
5914         u32 std_prod_idx, jmb_prod_idx;
5915         u32 sw_idx = tnapi->rx_rcb_ptr;
5916         u16 hw_idx;
5917         int received;
5918         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5919
5920         hw_idx = *(tnapi->rx_rcb_prod_idx);
5921         /*
5922          * We need to order the read of hw_idx and the read of
5923          * the opaque cookie.
5924          */
5925         rmb();
5926         work_mask = 0;
5927         received = 0;
5928         std_prod_idx = tpr->rx_std_prod_idx;
5929         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5930         while (sw_idx != hw_idx && budget > 0) {
5931                 struct ring_info *ri;
5932                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5933                 unsigned int len;
5934                 struct sk_buff *skb;
5935                 dma_addr_t dma_addr;
5936                 u32 opaque_key, desc_idx, *post_ptr;
5937                 u8 *data;
5938
5939                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5940                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5941                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5942                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5943                         dma_addr = dma_unmap_addr(ri, mapping);
5944                         data = ri->data;
5945                         post_ptr = &std_prod_idx;
5946                         rx_std_posted++;
5947                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5948                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5949                         dma_addr = dma_unmap_addr(ri, mapping);
5950                         data = ri->data;
5951                         post_ptr = &jmb_prod_idx;
5952                 } else
5953                         goto next_pkt_nopost;
5954
5955                 work_mask |= opaque_key;
5956
5957                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5958                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5959                 drop_it:
5960                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5961                                        desc_idx, *post_ptr);
5962                 drop_it_no_recycle:
5963                         /* Other statistics kept track of by card. */
5964                         tp->rx_dropped++;
5965                         goto next_pkt;
5966                 }
5967
5968                 prefetch(data + TG3_RX_OFFSET(tp));
5969                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5970                       ETH_FCS_LEN;
5971
5972                 if (len > TG3_RX_COPY_THRESH(tp)) {
5973                         int skb_size;
5974                         unsigned int frag_size;
5975
5976                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5977                                                     *post_ptr, &frag_size);
5978                         if (skb_size < 0)
5979                                 goto drop_it;
5980
5981                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5982                                          PCI_DMA_FROMDEVICE);
5983
5984                         skb = build_skb(data, frag_size);
5985                         if (!skb) {
5986                                 tg3_frag_free(frag_size != 0, data);
5987                                 goto drop_it_no_recycle;
5988                         }
5989                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5990                         /* Ensure that the update to the data happens
5991                          * after the usage of the old DMA mapping.
5992                          */
5993                         smp_wmb();
5994
5995                         ri->data = NULL;
5996
5997                 } else {
5998                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5999                                        desc_idx, *post_ptr);
6000
6001                         skb = netdev_alloc_skb(tp->dev,
6002                                                len + TG3_RAW_IP_ALIGN);
6003                         if (skb == NULL)
6004                                 goto drop_it_no_recycle;
6005
6006                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6007                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6008                         memcpy(skb->data,
6009                                data + TG3_RX_OFFSET(tp),
6010                                len);
6011                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6012                 }
6013
6014                 skb_put(skb, len);
6015                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6016                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6017                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6018                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6019                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6020                 else
6021                         skb_checksum_none_assert(skb);
6022
6023                 skb->protocol = eth_type_trans(skb, tp->dev);
6024
6025                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6026                     skb->protocol != htons(ETH_P_8021Q)) {
6027                         dev_kfree_skb(skb);
6028                         goto drop_it_no_recycle;
6029                 }
6030
6031                 if (desc->type_flags & RXD_FLAG_VLAN &&
6032                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6033                         __vlan_hwaccel_put_tag(skb,
6034                                                desc->err_vlan & RXD_VLAN_MASK);
6035
6036                 napi_gro_receive(&tnapi->napi, skb);
6037
6038                 received++;
6039                 budget--;
6040
6041 next_pkt:
6042                 (*post_ptr)++;
6043
6044                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6045                         tpr->rx_std_prod_idx = std_prod_idx &
6046                                                tp->rx_std_ring_mask;
6047                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6048                                      tpr->rx_std_prod_idx);
6049                         work_mask &= ~RXD_OPAQUE_RING_STD;
6050                         rx_std_posted = 0;
6051                 }
6052 next_pkt_nopost:
6053                 sw_idx++;
6054                 sw_idx &= tp->rx_ret_ring_mask;
6055
6056                 /* Refresh hw_idx to see if there is new work */
6057                 if (sw_idx == hw_idx) {
6058                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6059                         rmb();
6060                 }
6061         }
6062
6063         /* ACK the status ring. */
6064         tnapi->rx_rcb_ptr = sw_idx;
6065         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6066
6067         /* Refill RX ring(s). */
6068         if (!tg3_flag(tp, ENABLE_RSS)) {
6069                 /* Sync BD data before updating mailbox */
6070                 wmb();
6071
6072                 if (work_mask & RXD_OPAQUE_RING_STD) {
6073                         tpr->rx_std_prod_idx = std_prod_idx &
6074                                                tp->rx_std_ring_mask;
6075                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6076                                      tpr->rx_std_prod_idx);
6077                 }
6078                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6079                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6080                                                tp->rx_jmb_ring_mask;
6081                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6082                                      tpr->rx_jmb_prod_idx);
6083                 }
6084                 mmiowb();
6085         } else if (work_mask) {
6086                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6087                  * updated before the producer indices can be updated.
6088                  */
6089                 smp_wmb();
6090
6091                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6092                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6093
6094                 if (tnapi != &tp->napi[1]) {
6095                         tp->rx_refill = true;
6096                         napi_schedule(&tp->napi[1].napi);
6097                 }
6098         }
6099
6100         return received;
6101 }
6102
6103 static void tg3_poll_link(struct tg3 *tp)
6104 {
6105         /* handle link change and other phy events */
6106         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6107                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6108
6109                 if (sblk->status & SD_STATUS_LINK_CHG) {
6110                         sblk->status = SD_STATUS_UPDATED |
6111                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6112                         spin_lock(&tp->lock);
6113                         if (tg3_flag(tp, USE_PHYLIB)) {
6114                                 tw32_f(MAC_STATUS,
6115                                      (MAC_STATUS_SYNC_CHANGED |
6116                                       MAC_STATUS_CFG_CHANGED |
6117                                       MAC_STATUS_MI_COMPLETION |
6118                                       MAC_STATUS_LNKSTATE_CHANGED));
6119                                 udelay(40);
6120                         } else
6121                                 tg3_setup_phy(tp, 0);
6122                         spin_unlock(&tp->lock);
6123                 }
6124         }
6125 }
6126
6127 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6128                                 struct tg3_rx_prodring_set *dpr,
6129                                 struct tg3_rx_prodring_set *spr)
6130 {
6131         u32 si, di, cpycnt, src_prod_idx;
6132         int i, err = 0;
6133
6134         while (1) {
6135                 src_prod_idx = spr->rx_std_prod_idx;
6136
6137                 /* Make sure updates to the rx_std_buffers[] entries and the
6138                  * standard producer index are seen in the correct order.
6139                  */
6140                 smp_rmb();
6141
6142                 if (spr->rx_std_cons_idx == src_prod_idx)
6143                         break;
6144
6145                 if (spr->rx_std_cons_idx < src_prod_idx)
6146                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6147                 else
6148                         cpycnt = tp->rx_std_ring_mask + 1 -
6149                                  spr->rx_std_cons_idx;
6150
6151                 cpycnt = min(cpycnt,
6152                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6153
6154                 si = spr->rx_std_cons_idx;
6155                 di = dpr->rx_std_prod_idx;
6156
6157                 for (i = di; i < di + cpycnt; i++) {
6158                         if (dpr->rx_std_buffers[i].data) {
6159                                 cpycnt = i - di;
6160                                 err = -ENOSPC;
6161                                 break;
6162                         }
6163                 }
6164
6165                 if (!cpycnt)
6166                         break;
6167
6168                 /* Ensure that updates to the rx_std_buffers ring and the
6169                  * shadowed hardware producer ring from tg3_recycle_skb() are
6170                  * ordered correctly WRT the skb check above.
6171                  */
6172                 smp_rmb();
6173
6174                 memcpy(&dpr->rx_std_buffers[di],
6175                        &spr->rx_std_buffers[si],
6176                        cpycnt * sizeof(struct ring_info));
6177
6178                 for (i = 0; i < cpycnt; i++, di++, si++) {
6179                         struct tg3_rx_buffer_desc *sbd, *dbd;
6180                         sbd = &spr->rx_std[si];
6181                         dbd = &dpr->rx_std[di];
6182                         dbd->addr_hi = sbd->addr_hi;
6183                         dbd->addr_lo = sbd->addr_lo;
6184                 }
6185
6186                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6187                                        tp->rx_std_ring_mask;
6188                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6189                                        tp->rx_std_ring_mask;
6190         }
6191
6192         while (1) {
6193                 src_prod_idx = spr->rx_jmb_prod_idx;
6194
6195                 /* Make sure updates to the rx_jmb_buffers[] entries and
6196                  * the jumbo producer index are seen in the correct order.
6197                  */
6198                 smp_rmb();
6199
6200                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6201                         break;
6202
6203                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6204                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6205                 else
6206                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6207                                  spr->rx_jmb_cons_idx;
6208
6209                 cpycnt = min(cpycnt,
6210                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6211
6212                 si = spr->rx_jmb_cons_idx;
6213                 di = dpr->rx_jmb_prod_idx;
6214
6215                 for (i = di; i < di + cpycnt; i++) {
6216                         if (dpr->rx_jmb_buffers[i].data) {
6217                                 cpycnt = i - di;
6218                                 err = -ENOSPC;
6219                                 break;
6220                         }
6221                 }
6222
6223                 if (!cpycnt)
6224                         break;
6225
6226                 /* Ensure that updates to the rx_jmb_buffers ring and the
6227                  * shadowed hardware producer ring from tg3_recycle_skb() are
6228                  * ordered correctly WRT the skb check above.
6229                  */
6230                 smp_rmb();
6231
6232                 memcpy(&dpr->rx_jmb_buffers[di],
6233                        &spr->rx_jmb_buffers[si],
6234                        cpycnt * sizeof(struct ring_info));
6235
6236                 for (i = 0; i < cpycnt; i++, di++, si++) {
6237                         struct tg3_rx_buffer_desc *sbd, *dbd;
6238                         sbd = &spr->rx_jmb[si].std;
6239                         dbd = &dpr->rx_jmb[di].std;
6240                         dbd->addr_hi = sbd->addr_hi;
6241                         dbd->addr_lo = sbd->addr_lo;
6242                 }
6243
6244                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6245                                        tp->rx_jmb_ring_mask;
6246                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6247                                        tp->rx_jmb_ring_mask;
6248         }
6249
6250         return err;
6251 }
6252
6253 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6254 {
6255         struct tg3 *tp = tnapi->tp;
6256
6257         /* run TX completion thread */
6258         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6259                 tg3_tx(tnapi);
6260                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6261                         return work_done;
6262         }
6263
6264         if (!tnapi->rx_rcb_prod_idx)
6265                 return work_done;
6266
6267         /* run RX thread, within the bounds set by NAPI.
6268          * All RX "locking" is done by ensuring outside
6269          * code synchronizes with tg3->napi.poll()
6270          */
6271         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6272                 work_done += tg3_rx(tnapi, budget - work_done);
6273
6274         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6275                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6276                 int i, err = 0;
6277                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6278                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6279
6280                 tp->rx_refill = false;
6281                 for (i = 1; i < tp->irq_cnt; i++)
6282                         err |= tg3_rx_prodring_xfer(tp, dpr,
6283                                                     &tp->napi[i].prodring);
6284
6285                 wmb();
6286
6287                 if (std_prod_idx != dpr->rx_std_prod_idx)
6288                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6289                                      dpr->rx_std_prod_idx);
6290
6291                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6292                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6293                                      dpr->rx_jmb_prod_idx);
6294
6295                 mmiowb();
6296
6297                 if (err)
6298                         tw32_f(HOSTCC_MODE, tp->coal_now);
6299         }
6300
6301         return work_done;
6302 }
6303
6304 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6305 {
6306         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6307                 schedule_work(&tp->reset_task);
6308 }
6309
6310 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6311 {
6312         cancel_work_sync(&tp->reset_task);
6313         tg3_flag_clear(tp, RESET_TASK_PENDING);
6314         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6315 }
6316
6317 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6318 {
6319         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6320         struct tg3 *tp = tnapi->tp;
6321         int work_done = 0;
6322         struct tg3_hw_status *sblk = tnapi->hw_status;
6323
6324         while (1) {
6325                 work_done = tg3_poll_work(tnapi, work_done, budget);
6326
6327                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6328                         goto tx_recovery;
6329
6330                 if (unlikely(work_done >= budget))
6331                         break;
6332
6333                 /* tp->last_tag is used in tg3_int_reenable() below
6334                  * to tell the hw how much work has been processed,
6335                  * so we must read it before checking for more work.
6336                  */
6337                 tnapi->last_tag = sblk->status_tag;
6338                 tnapi->last_irq_tag = tnapi->last_tag;
6339                 rmb();
6340
6341                 /* check for RX/TX work to do */
6342                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6343                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6344
6345                         /* This test here is not race free, but will reduce
6346                          * the number of interrupts by looping again.
6347                          */
6348                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6349                                 continue;
6350
6351                         napi_complete(napi);
6352                         /* Reenable interrupts. */
6353                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6354
6355                         /* This test here is synchronized by napi_schedule()
6356                          * and napi_complete() to close the race condition.
6357                          */
6358                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6359                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6360                                                   HOSTCC_MODE_ENABLE |
6361                                                   tnapi->coal_now);
6362                         }
6363                         mmiowb();
6364                         break;
6365                 }
6366         }
6367
6368         return work_done;
6369
6370 tx_recovery:
6371         /* work_done is guaranteed to be less than budget. */
6372         napi_complete(napi);
6373         tg3_reset_task_schedule(tp);
6374         return work_done;
6375 }
6376
6377 static void tg3_process_error(struct tg3 *tp)
6378 {
6379         u32 val;
6380         bool real_error = false;
6381
6382         if (tg3_flag(tp, ERROR_PROCESSED))
6383                 return;
6384
6385         /* Check Flow Attention register */
6386         val = tr32(HOSTCC_FLOW_ATTN);
6387         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6388                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6389                 real_error = true;
6390         }
6391
6392         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6393                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6394                 real_error = true;
6395         }
6396
6397         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6398                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6399                 real_error = true;
6400         }
6401
6402         if (!real_error)
6403                 return;
6404
6405         tg3_dump_state(tp);
6406
6407         tg3_flag_set(tp, ERROR_PROCESSED);
6408         tg3_reset_task_schedule(tp);
6409 }
6410
6411 static int tg3_poll(struct napi_struct *napi, int budget)
6412 {
6413         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6414         struct tg3 *tp = tnapi->tp;
6415         int work_done = 0;
6416         struct tg3_hw_status *sblk = tnapi->hw_status;
6417
6418         while (1) {
6419                 if (sblk->status & SD_STATUS_ERROR)
6420                         tg3_process_error(tp);
6421
6422                 tg3_poll_link(tp);
6423
6424                 work_done = tg3_poll_work(tnapi, work_done, budget);
6425
6426                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6427                         goto tx_recovery;
6428
6429                 if (unlikely(work_done >= budget))
6430                         break;
6431
6432                 if (tg3_flag(tp, TAGGED_STATUS)) {
6433                         /* tp->last_tag is used in tg3_int_reenable() below
6434                          * to tell the hw how much work has been processed,
6435                          * so we must read it before checking for more work.
6436                          */
6437                         tnapi->last_tag = sblk->status_tag;
6438                         tnapi->last_irq_tag = tnapi->last_tag;
6439                         rmb();
6440                 } else
6441                         sblk->status &= ~SD_STATUS_UPDATED;
6442
6443                 if (likely(!tg3_has_work(tnapi))) {
6444                         napi_complete(napi);
6445                         tg3_int_reenable(tnapi);
6446                         break;
6447                 }
6448         }
6449
6450         return work_done;
6451
6452 tx_recovery:
6453         /* work_done is guaranteed to be less than budget. */
6454         napi_complete(napi);
6455         tg3_reset_task_schedule(tp);
6456         return work_done;
6457 }
6458
6459 static void tg3_napi_disable(struct tg3 *tp)
6460 {
6461         int i;
6462
6463         for (i = tp->irq_cnt - 1; i >= 0; i--)
6464                 napi_disable(&tp->napi[i].napi);
6465 }
6466
6467 static void tg3_napi_enable(struct tg3 *tp)
6468 {
6469         int i;
6470
6471         for (i = 0; i < tp->irq_cnt; i++)
6472                 napi_enable(&tp->napi[i].napi);
6473 }
6474
6475 static void tg3_napi_init(struct tg3 *tp)
6476 {
6477         int i;
6478
6479         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6480         for (i = 1; i < tp->irq_cnt; i++)
6481                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6482 }
6483
6484 static void tg3_napi_fini(struct tg3 *tp)
6485 {
6486         int i;
6487
6488         for (i = 0; i < tp->irq_cnt; i++)
6489                 netif_napi_del(&tp->napi[i].napi);
6490 }
6491
6492 static inline void tg3_netif_stop(struct tg3 *tp)
6493 {
6494         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6495         tg3_napi_disable(tp);
6496         netif_tx_disable(tp->dev);
6497 }
6498
6499 static inline void tg3_netif_start(struct tg3 *tp)
6500 {
6501         /* NOTE: unconditional netif_tx_wake_all_queues is only
6502          * appropriate so long as all callers are assured to
6503          * have free tx slots (such as after tg3_init_hw)
6504          */
6505         netif_tx_wake_all_queues(tp->dev);
6506
6507         tg3_napi_enable(tp);
6508         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6509         tg3_enable_ints(tp);
6510 }
6511
6512 static void tg3_irq_quiesce(struct tg3 *tp)
6513 {
6514         int i;
6515
6516         BUG_ON(tp->irq_sync);
6517
6518         tp->irq_sync = 1;
6519         smp_mb();
6520
6521         for (i = 0; i < tp->irq_cnt; i++)
6522                 synchronize_irq(tp->napi[i].irq_vec);
6523 }
6524
6525 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6526  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6527  * with as well.  Most of the time, this is not necessary except when
6528  * shutting down the device.
6529  */
6530 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6531 {
6532         spin_lock_bh(&tp->lock);
6533         if (irq_sync)
6534                 tg3_irq_quiesce(tp);
6535 }
6536
6537 static inline void tg3_full_unlock(struct tg3 *tp)
6538 {
6539         spin_unlock_bh(&tp->lock);
6540 }
6541
6542 /* One-shot MSI handler - Chip automatically disables interrupt
6543  * after sending MSI so driver doesn't have to do it.
6544  */
6545 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6546 {
6547         struct tg3_napi *tnapi = dev_id;
6548         struct tg3 *tp = tnapi->tp;
6549
6550         prefetch(tnapi->hw_status);
6551         if (tnapi->rx_rcb)
6552                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6553
6554         if (likely(!tg3_irq_sync(tp)))
6555                 napi_schedule(&tnapi->napi);
6556
6557         return IRQ_HANDLED;
6558 }
6559
6560 /* MSI ISR - No need to check for interrupt sharing and no need to
6561  * flush status block and interrupt mailbox. PCI ordering rules
6562  * guarantee that MSI will arrive after the status block.
6563  */
6564 static irqreturn_t tg3_msi(int irq, void *dev_id)
6565 {
6566         struct tg3_napi *tnapi = dev_id;
6567         struct tg3 *tp = tnapi->tp;
6568
6569         prefetch(tnapi->hw_status);
6570         if (tnapi->rx_rcb)
6571                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6572         /*
6573          * Writing any value to intr-mbox-0 clears PCI INTA# and
6574          * chip-internal interrupt pending events.
6575          * Writing non-zero to intr-mbox-0 additional tells the
6576          * NIC to stop sending us irqs, engaging "in-intr-handler"
6577          * event coalescing.
6578          */
6579         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6580         if (likely(!tg3_irq_sync(tp)))
6581                 napi_schedule(&tnapi->napi);
6582
6583         return IRQ_RETVAL(1);
6584 }
6585
6586 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6587 {
6588         struct tg3_napi *tnapi = dev_id;
6589         struct tg3 *tp = tnapi->tp;
6590         struct tg3_hw_status *sblk = tnapi->hw_status;
6591         unsigned int handled = 1;
6592
6593         /* In INTx mode, it is possible for the interrupt to arrive at
6594          * the CPU before the status block posted prior to the interrupt.
6595          * Reading the PCI State register will confirm whether the
6596          * interrupt is ours and will flush the status block.
6597          */
6598         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6599                 if (tg3_flag(tp, CHIP_RESETTING) ||
6600                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6601                         handled = 0;
6602                         goto out;
6603                 }
6604         }
6605
6606         /*
6607          * Writing any value to intr-mbox-0 clears PCI INTA# and
6608          * chip-internal interrupt pending events.
6609          * Writing non-zero to intr-mbox-0 additional tells the
6610          * NIC to stop sending us irqs, engaging "in-intr-handler"
6611          * event coalescing.
6612          *
6613          * Flush the mailbox to de-assert the IRQ immediately to prevent
6614          * spurious interrupts.  The flush impacts performance but
6615          * excessive spurious interrupts can be worse in some cases.
6616          */
6617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6618         if (tg3_irq_sync(tp))
6619                 goto out;
6620         sblk->status &= ~SD_STATUS_UPDATED;
6621         if (likely(tg3_has_work(tnapi))) {
6622                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6623                 napi_schedule(&tnapi->napi);
6624         } else {
6625                 /* No work, shared interrupt perhaps?  re-enable
6626                  * interrupts, and flush that PCI write
6627                  */
6628                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6629                                0x00000000);
6630         }
6631 out:
6632         return IRQ_RETVAL(handled);
6633 }
6634
6635 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6636 {
6637         struct tg3_napi *tnapi = dev_id;
6638         struct tg3 *tp = tnapi->tp;
6639         struct tg3_hw_status *sblk = tnapi->hw_status;
6640         unsigned int handled = 1;
6641
6642         /* In INTx mode, it is possible for the interrupt to arrive at
6643          * the CPU before the status block posted prior to the interrupt.
6644          * Reading the PCI State register will confirm whether the
6645          * interrupt is ours and will flush the status block.
6646          */
6647         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6648                 if (tg3_flag(tp, CHIP_RESETTING) ||
6649                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6650                         handled = 0;
6651                         goto out;
6652                 }
6653         }
6654
6655         /*
6656          * writing any value to intr-mbox-0 clears PCI INTA# and
6657          * chip-internal interrupt pending events.
6658          * writing non-zero to intr-mbox-0 additional tells the
6659          * NIC to stop sending us irqs, engaging "in-intr-handler"
6660          * event coalescing.
6661          *
6662          * Flush the mailbox to de-assert the IRQ immediately to prevent
6663          * spurious interrupts.  The flush impacts performance but
6664          * excessive spurious interrupts can be worse in some cases.
6665          */
6666         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6667
6668         /*
6669          * In a shared interrupt configuration, sometimes other devices'
6670          * interrupts will scream.  We record the current status tag here
6671          * so that the above check can report that the screaming interrupts
6672          * are unhandled.  Eventually they will be silenced.
6673          */
6674         tnapi->last_irq_tag = sblk->status_tag;
6675
6676         if (tg3_irq_sync(tp))
6677                 goto out;
6678
6679         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6680
6681         napi_schedule(&tnapi->napi);
6682
6683 out:
6684         return IRQ_RETVAL(handled);
6685 }
6686
6687 /* ISR for interrupt test */
6688 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6689 {
6690         struct tg3_napi *tnapi = dev_id;
6691         struct tg3 *tp = tnapi->tp;
6692         struct tg3_hw_status *sblk = tnapi->hw_status;
6693
6694         if ((sblk->status & SD_STATUS_UPDATED) ||
6695             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6696                 tg3_disable_ints(tp);
6697                 return IRQ_RETVAL(1);
6698         }
6699         return IRQ_RETVAL(0);
6700 }
6701
6702 #ifdef CONFIG_NET_POLL_CONTROLLER
6703 static void tg3_poll_controller(struct net_device *dev)
6704 {
6705         int i;
6706         struct tg3 *tp = netdev_priv(dev);
6707
6708         for (i = 0; i < tp->irq_cnt; i++)
6709                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6710 }
6711 #endif
6712
6713 static void tg3_tx_timeout(struct net_device *dev)
6714 {
6715         struct tg3 *tp = netdev_priv(dev);
6716
6717         if (netif_msg_tx_err(tp)) {
6718                 netdev_err(dev, "transmit timed out, resetting\n");
6719                 tg3_dump_state(tp);
6720         }
6721
6722         tg3_reset_task_schedule(tp);
6723 }
6724
6725 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6726 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6727 {
6728         u32 base = (u32) mapping & 0xffffffff;
6729
6730         return (base > 0xffffdcc0) && (base + len + 8 < base);
6731 }
6732
6733 /* Test for DMA addresses > 40-bit */
6734 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6735                                           int len)
6736 {
6737 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6738         if (tg3_flag(tp, 40BIT_DMA_BUG))
6739                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6740         return 0;
6741 #else
6742         return 0;
6743 #endif
6744 }
6745
6746 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6747                                  dma_addr_t mapping, u32 len, u32 flags,
6748                                  u32 mss, u32 vlan)
6749 {
6750         txbd->addr_hi = ((u64) mapping >> 32);
6751         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6752         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6753         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6754 }
6755
6756 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6757                             dma_addr_t map, u32 len, u32 flags,
6758                             u32 mss, u32 vlan)
6759 {
6760         struct tg3 *tp = tnapi->tp;
6761         bool hwbug = false;
6762
6763         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6764                 hwbug = true;
6765
6766         if (tg3_4g_overflow_test(map, len))
6767                 hwbug = true;
6768
6769         if (tg3_40bit_overflow_test(tp, map, len))
6770                 hwbug = true;
6771
6772         if (tp->dma_limit) {
6773                 u32 prvidx = *entry;
6774                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6775                 while (len > tp->dma_limit && *budget) {
6776                         u32 frag_len = tp->dma_limit;
6777                         len -= tp->dma_limit;
6778
6779                         /* Avoid the 8byte DMA problem */
6780                         if (len <= 8) {
6781                                 len += tp->dma_limit / 2;
6782                                 frag_len = tp->dma_limit / 2;
6783                         }
6784
6785                         tnapi->tx_buffers[*entry].fragmented = true;
6786
6787                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6788                                       frag_len, tmp_flag, mss, vlan);
6789                         *budget -= 1;
6790                         prvidx = *entry;
6791                         *entry = NEXT_TX(*entry);
6792
6793                         map += frag_len;
6794                 }
6795
6796                 if (len) {
6797                         if (*budget) {
6798                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6799                                               len, flags, mss, vlan);
6800                                 *budget -= 1;
6801                                 *entry = NEXT_TX(*entry);
6802                         } else {
6803                                 hwbug = true;
6804                                 tnapi->tx_buffers[prvidx].fragmented = false;
6805                         }
6806                 }
6807         } else {
6808                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6809                               len, flags, mss, vlan);
6810                 *entry = NEXT_TX(*entry);
6811         }
6812
6813         return hwbug;
6814 }
6815
6816 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6817 {
6818         int i;
6819         struct sk_buff *skb;
6820         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6821
6822         skb = txb->skb;
6823         txb->skb = NULL;
6824
6825         pci_unmap_single(tnapi->tp->pdev,
6826                          dma_unmap_addr(txb, mapping),
6827                          skb_headlen(skb),
6828                          PCI_DMA_TODEVICE);
6829
6830         while (txb->fragmented) {
6831                 txb->fragmented = false;
6832                 entry = NEXT_TX(entry);
6833                 txb = &tnapi->tx_buffers[entry];
6834         }
6835
6836         for (i = 0; i <= last; i++) {
6837                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6838
6839                 entry = NEXT_TX(entry);
6840                 txb = &tnapi->tx_buffers[entry];
6841
6842                 pci_unmap_page(tnapi->tp->pdev,
6843                                dma_unmap_addr(txb, mapping),
6844                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6845
6846                 while (txb->fragmented) {
6847                         txb->fragmented = false;
6848                         entry = NEXT_TX(entry);
6849                         txb = &tnapi->tx_buffers[entry];
6850                 }
6851         }
6852 }
6853
6854 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6855 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6856                                        struct sk_buff **pskb,
6857                                        u32 *entry, u32 *budget,
6858                                        u32 base_flags, u32 mss, u32 vlan)
6859 {
6860         struct tg3 *tp = tnapi->tp;
6861         struct sk_buff *new_skb, *skb = *pskb;
6862         dma_addr_t new_addr = 0;
6863         int ret = 0;
6864
6865         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6866                 new_skb = skb_copy(skb, GFP_ATOMIC);
6867         else {
6868                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6869
6870                 new_skb = skb_copy_expand(skb,
6871                                           skb_headroom(skb) + more_headroom,
6872                                           skb_tailroom(skb), GFP_ATOMIC);
6873         }
6874
6875         if (!new_skb) {
6876                 ret = -1;
6877         } else {
6878                 /* New SKB is guaranteed to be linear. */
6879                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6880                                           PCI_DMA_TODEVICE);
6881                 /* Make sure the mapping succeeded */
6882                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6883                         dev_kfree_skb(new_skb);
6884                         ret = -1;
6885                 } else {
6886                         u32 save_entry = *entry;
6887
6888                         base_flags |= TXD_FLAG_END;
6889
6890                         tnapi->tx_buffers[*entry].skb = new_skb;
6891                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6892                                            mapping, new_addr);
6893
6894                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6895                                             new_skb->len, base_flags,
6896                                             mss, vlan)) {
6897                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6898                                 dev_kfree_skb(new_skb);
6899                                 ret = -1;
6900                         }
6901                 }
6902         }
6903
6904         dev_kfree_skb(skb);
6905         *pskb = new_skb;
6906         return ret;
6907 }
6908
6909 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6910
6911 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6912  * TSO header is greater than 80 bytes.
6913  */
6914 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6915 {
6916         struct sk_buff *segs, *nskb;
6917         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6918
6919         /* Estimate the number of fragments in the worst case */
6920         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6921                 netif_stop_queue(tp->dev);
6922
6923                 /* netif_tx_stop_queue() must be done before checking
6924                  * checking tx index in tg3_tx_avail() below, because in
6925                  * tg3_tx(), we update tx index before checking for
6926                  * netif_tx_queue_stopped().
6927                  */
6928                 smp_mb();
6929                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6930                         return NETDEV_TX_BUSY;
6931
6932                 netif_wake_queue(tp->dev);
6933         }
6934
6935         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6936         if (IS_ERR(segs))
6937                 goto tg3_tso_bug_end;
6938
6939         do {
6940                 nskb = segs;
6941                 segs = segs->next;
6942                 nskb->next = NULL;
6943                 tg3_start_xmit(nskb, tp->dev);
6944         } while (segs);
6945
6946 tg3_tso_bug_end:
6947         dev_kfree_skb(skb);
6948
6949         return NETDEV_TX_OK;
6950 }
6951
6952 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6953  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6954  */
6955 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6956 {
6957         struct tg3 *tp = netdev_priv(dev);
6958         u32 len, entry, base_flags, mss, vlan = 0;
6959         u32 budget;
6960         int i = -1, would_hit_hwbug;
6961         dma_addr_t mapping;
6962         struct tg3_napi *tnapi;
6963         struct netdev_queue *txq;
6964         unsigned int last;
6965
6966         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6967         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6968         if (tg3_flag(tp, ENABLE_TSS))
6969                 tnapi++;
6970
6971         budget = tg3_tx_avail(tnapi);
6972
6973         /* We are running in BH disabled context with netif_tx_lock
6974          * and TX reclaim runs via tp->napi.poll inside of a software
6975          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6976          * no IRQ context deadlocks to worry about either.  Rejoice!
6977          */
6978         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6979                 if (!netif_tx_queue_stopped(txq)) {
6980                         netif_tx_stop_queue(txq);
6981
6982                         /* This is a hard error, log it. */
6983                         netdev_err(dev,
6984                                    "BUG! Tx Ring full when queue awake!\n");
6985                 }
6986                 return NETDEV_TX_BUSY;
6987         }
6988
6989         entry = tnapi->tx_prod;
6990         base_flags = 0;
6991         if (skb->ip_summed == CHECKSUM_PARTIAL)
6992                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6993
6994         mss = skb_shinfo(skb)->gso_size;
6995         if (mss) {
6996                 struct iphdr *iph;
6997                 u32 tcp_opt_len, hdr_len;
6998
6999                 if (skb_header_cloned(skb) &&
7000                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7001                         goto drop;
7002
7003                 iph = ip_hdr(skb);
7004                 tcp_opt_len = tcp_optlen(skb);
7005
7006                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7007
7008                 if (!skb_is_gso_v6(skb)) {
7009                         iph->check = 0;
7010                         iph->tot_len = htons(mss + hdr_len);
7011                 }
7012
7013                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7014                     tg3_flag(tp, TSO_BUG))
7015                         return tg3_tso_bug(tp, skb);
7016
7017                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7018                                TXD_FLAG_CPU_POST_DMA);
7019
7020                 if (tg3_flag(tp, HW_TSO_1) ||
7021                     tg3_flag(tp, HW_TSO_2) ||
7022                     tg3_flag(tp, HW_TSO_3)) {
7023                         tcp_hdr(skb)->check = 0;
7024                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7025                 } else
7026                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7027                                                                  iph->daddr, 0,
7028                                                                  IPPROTO_TCP,
7029                                                                  0);
7030
7031                 if (tg3_flag(tp, HW_TSO_3)) {
7032                         mss |= (hdr_len & 0xc) << 12;
7033                         if (hdr_len & 0x10)
7034                                 base_flags |= 0x00000010;
7035                         base_flags |= (hdr_len & 0x3e0) << 5;
7036                 } else if (tg3_flag(tp, HW_TSO_2))
7037                         mss |= hdr_len << 9;
7038                 else if (tg3_flag(tp, HW_TSO_1) ||
7039                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7040                         if (tcp_opt_len || iph->ihl > 5) {
7041                                 int tsflags;
7042
7043                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7044                                 mss |= (tsflags << 11);
7045                         }
7046                 } else {
7047                         if (tcp_opt_len || iph->ihl > 5) {
7048                                 int tsflags;
7049
7050                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7051                                 base_flags |= tsflags << 12;
7052                         }
7053                 }
7054         }
7055
7056         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7057             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7058                 base_flags |= TXD_FLAG_JMB_PKT;
7059
7060         if (vlan_tx_tag_present(skb)) {
7061                 base_flags |= TXD_FLAG_VLAN;
7062                 vlan = vlan_tx_tag_get(skb);
7063         }
7064
7065         len = skb_headlen(skb);
7066
7067         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7068         if (pci_dma_mapping_error(tp->pdev, mapping))
7069                 goto drop;
7070
7071
7072         tnapi->tx_buffers[entry].skb = skb;
7073         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7074
7075         would_hit_hwbug = 0;
7076
7077         if (tg3_flag(tp, 5701_DMA_BUG))
7078                 would_hit_hwbug = 1;
7079
7080         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7081                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7082                             mss, vlan)) {
7083                 would_hit_hwbug = 1;
7084         } else if (skb_shinfo(skb)->nr_frags > 0) {
7085                 u32 tmp_mss = mss;
7086
7087                 if (!tg3_flag(tp, HW_TSO_1) &&
7088                     !tg3_flag(tp, HW_TSO_2) &&
7089                     !tg3_flag(tp, HW_TSO_3))
7090                         tmp_mss = 0;
7091
7092                 /* Now loop through additional data
7093                  * fragments, and queue them.
7094                  */
7095                 last = skb_shinfo(skb)->nr_frags - 1;
7096                 for (i = 0; i <= last; i++) {
7097                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7098
7099                         len = skb_frag_size(frag);
7100                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7101                                                    len, DMA_TO_DEVICE);
7102
7103                         tnapi->tx_buffers[entry].skb = NULL;
7104                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7105                                            mapping);
7106                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7107                                 goto dma_error;
7108
7109                         if (!budget ||
7110                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7111                                             len, base_flags |
7112                                             ((i == last) ? TXD_FLAG_END : 0),
7113                                             tmp_mss, vlan)) {
7114                                 would_hit_hwbug = 1;
7115                                 break;
7116                         }
7117                 }
7118         }
7119
7120         if (would_hit_hwbug) {
7121                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7122
7123                 /* If the workaround fails due to memory/mapping
7124                  * failure, silently drop this packet.
7125                  */
7126                 entry = tnapi->tx_prod;
7127                 budget = tg3_tx_avail(tnapi);
7128                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7129                                                 base_flags, mss, vlan))
7130                         goto drop_nofree;
7131         }
7132
7133         skb_tx_timestamp(skb);
7134         netdev_tx_sent_queue(txq, skb->len);
7135
7136         /* Sync BD data before updating mailbox */
7137         wmb();
7138
7139         /* Packets are ready, update Tx producer idx local and on card. */
7140         tw32_tx_mbox(tnapi->prodmbox, entry);
7141
7142         tnapi->tx_prod = entry;
7143         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7144                 netif_tx_stop_queue(txq);
7145
7146                 /* netif_tx_stop_queue() must be done before checking
7147                  * checking tx index in tg3_tx_avail() below, because in
7148                  * tg3_tx(), we update tx index before checking for
7149                  * netif_tx_queue_stopped().
7150                  */
7151                 smp_mb();
7152                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7153                         netif_tx_wake_queue(txq);
7154         }
7155
7156         mmiowb();
7157         return NETDEV_TX_OK;
7158
7159 dma_error:
7160         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7161         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7162 drop:
7163         dev_kfree_skb(skb);
7164 drop_nofree:
7165         tp->tx_dropped++;
7166         return NETDEV_TX_OK;
7167 }
7168
7169 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7170 {
7171         if (enable) {
7172                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7173                                   MAC_MODE_PORT_MODE_MASK);
7174
7175                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7176
7177                 if (!tg3_flag(tp, 5705_PLUS))
7178                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7179
7180                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7181                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7182                 else
7183                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7184         } else {
7185                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7186
7187                 if (tg3_flag(tp, 5705_PLUS) ||
7188                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7190                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7191         }
7192
7193         tw32(MAC_MODE, tp->mac_mode);
7194         udelay(40);
7195 }
7196
7197 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7198 {
7199         u32 val, bmcr, mac_mode, ptest = 0;
7200
7201         tg3_phy_toggle_apd(tp, false);
7202         tg3_phy_toggle_automdix(tp, 0);
7203
7204         if (extlpbk && tg3_phy_set_extloopbk(tp))
7205                 return -EIO;
7206
7207         bmcr = BMCR_FULLDPLX;
7208         switch (speed) {
7209         case SPEED_10:
7210                 break;
7211         case SPEED_100:
7212                 bmcr |= BMCR_SPEED100;
7213                 break;
7214         case SPEED_1000:
7215         default:
7216                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7217                         speed = SPEED_100;
7218                         bmcr |= BMCR_SPEED100;
7219                 } else {
7220                         speed = SPEED_1000;
7221                         bmcr |= BMCR_SPEED1000;
7222                 }
7223         }
7224
7225         if (extlpbk) {
7226                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7227                         tg3_readphy(tp, MII_CTRL1000, &val);
7228                         val |= CTL1000_AS_MASTER |
7229                                CTL1000_ENABLE_MASTER;
7230                         tg3_writephy(tp, MII_CTRL1000, val);
7231                 } else {
7232                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7233                                 MII_TG3_FET_PTEST_TRIM_2;
7234                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7235                 }
7236         } else
7237                 bmcr |= BMCR_LOOPBACK;
7238
7239         tg3_writephy(tp, MII_BMCR, bmcr);
7240
7241         /* The write needs to be flushed for the FETs */
7242         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7243                 tg3_readphy(tp, MII_BMCR, &bmcr);
7244
7245         udelay(40);
7246
7247         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7249                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7250                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7251                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7252
7253                 /* The write needs to be flushed for the AC131 */
7254                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7255         }
7256
7257         /* Reset to prevent losing 1st rx packet intermittently */
7258         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7259             tg3_flag(tp, 5780_CLASS)) {
7260                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7261                 udelay(10);
7262                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7263         }
7264
7265         mac_mode = tp->mac_mode &
7266                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7267         if (speed == SPEED_1000)
7268                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7269         else
7270                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7271
7272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7273                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7274
7275                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7276                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7277                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7278                         mac_mode |= MAC_MODE_LINK_POLARITY;
7279
7280                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7281                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7282         }
7283
7284         tw32(MAC_MODE, mac_mode);
7285         udelay(40);
7286
7287         return 0;
7288 }
7289
7290 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7291 {
7292         struct tg3 *tp = netdev_priv(dev);
7293
7294         if (features & NETIF_F_LOOPBACK) {
7295                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7296                         return;
7297
7298                 spin_lock_bh(&tp->lock);
7299                 tg3_mac_loopback(tp, true);
7300                 netif_carrier_on(tp->dev);
7301                 spin_unlock_bh(&tp->lock);
7302                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7303         } else {
7304                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7305                         return;
7306
7307                 spin_lock_bh(&tp->lock);
7308                 tg3_mac_loopback(tp, false);
7309                 /* Force link status check */
7310                 tg3_setup_phy(tp, 1);
7311                 spin_unlock_bh(&tp->lock);
7312                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7313         }
7314 }
7315
7316 static netdev_features_t tg3_fix_features(struct net_device *dev,
7317         netdev_features_t features)
7318 {
7319         struct tg3 *tp = netdev_priv(dev);
7320
7321         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7322                 features &= ~NETIF_F_ALL_TSO;
7323
7324         return features;
7325 }
7326
7327 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7328 {
7329         netdev_features_t changed = dev->features ^ features;
7330
7331         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7332                 tg3_set_loopback(dev, features);
7333
7334         return 0;
7335 }
7336
7337 static void tg3_rx_prodring_free(struct tg3 *tp,
7338                                  struct tg3_rx_prodring_set *tpr)
7339 {
7340         int i;
7341
7342         if (tpr != &tp->napi[0].prodring) {
7343                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7344                      i = (i + 1) & tp->rx_std_ring_mask)
7345                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7346                                         tp->rx_pkt_map_sz);
7347
7348                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7349                         for (i = tpr->rx_jmb_cons_idx;
7350                              i != tpr->rx_jmb_prod_idx;
7351                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7352                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7353                                                 TG3_RX_JMB_MAP_SZ);
7354                         }
7355                 }
7356
7357                 return;
7358         }
7359
7360         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7361                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7362                                 tp->rx_pkt_map_sz);
7363
7364         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7365                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7366                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7367                                         TG3_RX_JMB_MAP_SZ);
7368         }
7369 }
7370
7371 /* Initialize rx rings for packet processing.
7372  *
7373  * The chip has been shut down and the driver detached from
7374  * the networking, so no interrupts or new tx packets will
7375  * end up in the driver.  tp->{tx,}lock are held and thus
7376  * we may not sleep.
7377  */
7378 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7379                                  struct tg3_rx_prodring_set *tpr)
7380 {
7381         u32 i, rx_pkt_dma_sz;
7382
7383         tpr->rx_std_cons_idx = 0;
7384         tpr->rx_std_prod_idx = 0;
7385         tpr->rx_jmb_cons_idx = 0;
7386         tpr->rx_jmb_prod_idx = 0;
7387
7388         if (tpr != &tp->napi[0].prodring) {
7389                 memset(&tpr->rx_std_buffers[0], 0,
7390                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7391                 if (tpr->rx_jmb_buffers)
7392                         memset(&tpr->rx_jmb_buffers[0], 0,
7393                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7394                 goto done;
7395         }
7396
7397         /* Zero out all descriptors. */
7398         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7399
7400         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7401         if (tg3_flag(tp, 5780_CLASS) &&
7402             tp->dev->mtu > ETH_DATA_LEN)
7403                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7404         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7405
7406         /* Initialize invariants of the rings, we only set this
7407          * stuff once.  This works because the card does not
7408          * write into the rx buffer posting rings.
7409          */
7410         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7411                 struct tg3_rx_buffer_desc *rxd;
7412
7413                 rxd = &tpr->rx_std[i];
7414                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7415                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7416                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7417                                (i << RXD_OPAQUE_INDEX_SHIFT));
7418         }
7419
7420         /* Now allocate fresh SKBs for each rx ring. */
7421         for (i = 0; i < tp->rx_pending; i++) {
7422                 unsigned int frag_size;
7423
7424                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7425                                       &frag_size) < 0) {
7426                         netdev_warn(tp->dev,
7427                                     "Using a smaller RX standard ring. Only "
7428                                     "%d out of %d buffers were allocated "
7429                                     "successfully\n", i, tp->rx_pending);
7430                         if (i == 0)
7431                                 goto initfail;
7432                         tp->rx_pending = i;
7433                         break;
7434                 }
7435         }
7436
7437         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7438                 goto done;
7439
7440         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7441
7442         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7443                 goto done;
7444
7445         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7446                 struct tg3_rx_buffer_desc *rxd;
7447
7448                 rxd = &tpr->rx_jmb[i].std;
7449                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7450                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7451                                   RXD_FLAG_JUMBO;
7452                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7453                        (i << RXD_OPAQUE_INDEX_SHIFT));
7454         }
7455
7456         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7457                 unsigned int frag_size;
7458
7459                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7460                                       &frag_size) < 0) {
7461                         netdev_warn(tp->dev,
7462                                     "Using a smaller RX jumbo ring. Only %d "
7463                                     "out of %d buffers were allocated "
7464                                     "successfully\n", i, tp->rx_jumbo_pending);
7465                         if (i == 0)
7466                                 goto initfail;
7467                         tp->rx_jumbo_pending = i;
7468                         break;
7469                 }
7470         }
7471
7472 done:
7473         return 0;
7474
7475 initfail:
7476         tg3_rx_prodring_free(tp, tpr);
7477         return -ENOMEM;
7478 }
7479
7480 static void tg3_rx_prodring_fini(struct tg3 *tp,
7481                                  struct tg3_rx_prodring_set *tpr)
7482 {
7483         kfree(tpr->rx_std_buffers);
7484         tpr->rx_std_buffers = NULL;
7485         kfree(tpr->rx_jmb_buffers);
7486         tpr->rx_jmb_buffers = NULL;
7487         if (tpr->rx_std) {
7488                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7489                                   tpr->rx_std, tpr->rx_std_mapping);
7490                 tpr->rx_std = NULL;
7491         }
7492         if (tpr->rx_jmb) {
7493                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7494                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7495                 tpr->rx_jmb = NULL;
7496         }
7497 }
7498
7499 static int tg3_rx_prodring_init(struct tg3 *tp,
7500                                 struct tg3_rx_prodring_set *tpr)
7501 {
7502         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7503                                       GFP_KERNEL);
7504         if (!tpr->rx_std_buffers)
7505                 return -ENOMEM;
7506
7507         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7508                                          TG3_RX_STD_RING_BYTES(tp),
7509                                          &tpr->rx_std_mapping,
7510                                          GFP_KERNEL);
7511         if (!tpr->rx_std)
7512                 goto err_out;
7513
7514         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7515                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7516                                               GFP_KERNEL);
7517                 if (!tpr->rx_jmb_buffers)
7518                         goto err_out;
7519
7520                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7521                                                  TG3_RX_JMB_RING_BYTES(tp),
7522                                                  &tpr->rx_jmb_mapping,
7523                                                  GFP_KERNEL);
7524                 if (!tpr->rx_jmb)
7525                         goto err_out;
7526         }
7527
7528         return 0;
7529
7530 err_out:
7531         tg3_rx_prodring_fini(tp, tpr);
7532         return -ENOMEM;
7533 }
7534
7535 /* Free up pending packets in all rx/tx rings.
7536  *
7537  * The chip has been shut down and the driver detached from
7538  * the networking, so no interrupts or new tx packets will
7539  * end up in the driver.  tp->{tx,}lock is not held and we are not
7540  * in an interrupt context and thus may sleep.
7541  */
7542 static void tg3_free_rings(struct tg3 *tp)
7543 {
7544         int i, j;
7545
7546         for (j = 0; j < tp->irq_cnt; j++) {
7547                 struct tg3_napi *tnapi = &tp->napi[j];
7548
7549                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7550
7551                 if (!tnapi->tx_buffers)
7552                         continue;
7553
7554                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7555                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7556
7557                         if (!skb)
7558                                 continue;
7559
7560                         tg3_tx_skb_unmap(tnapi, i,
7561                                          skb_shinfo(skb)->nr_frags - 1);
7562
7563                         dev_kfree_skb_any(skb);
7564                 }
7565                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7566         }
7567 }
7568
7569 /* Initialize tx/rx rings for packet processing.
7570  *
7571  * The chip has been shut down and the driver detached from
7572  * the networking, so no interrupts or new tx packets will
7573  * end up in the driver.  tp->{tx,}lock are held and thus
7574  * we may not sleep.
7575  */
7576 static int tg3_init_rings(struct tg3 *tp)
7577 {
7578         int i;
7579
7580         /* Free up all the SKBs. */
7581         tg3_free_rings(tp);
7582
7583         for (i = 0; i < tp->irq_cnt; i++) {
7584                 struct tg3_napi *tnapi = &tp->napi[i];
7585
7586                 tnapi->last_tag = 0;
7587                 tnapi->last_irq_tag = 0;
7588                 tnapi->hw_status->status = 0;
7589                 tnapi->hw_status->status_tag = 0;
7590                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7591
7592                 tnapi->tx_prod = 0;
7593                 tnapi->tx_cons = 0;
7594                 if (tnapi->tx_ring)
7595                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7596
7597                 tnapi->rx_rcb_ptr = 0;
7598                 if (tnapi->rx_rcb)
7599                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7600
7601                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7602                         tg3_free_rings(tp);
7603                         return -ENOMEM;
7604                 }
7605         }
7606
7607         return 0;
7608 }
7609
7610 /*
7611  * Must not be invoked with interrupt sources disabled and
7612  * the hardware shutdown down.
7613  */
7614 static void tg3_free_consistent(struct tg3 *tp)
7615 {
7616         int i;
7617
7618         for (i = 0; i < tp->irq_cnt; i++) {
7619                 struct tg3_napi *tnapi = &tp->napi[i];
7620
7621                 if (tnapi->tx_ring) {
7622                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7623                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7624                         tnapi->tx_ring = NULL;
7625                 }
7626
7627                 kfree(tnapi->tx_buffers);
7628                 tnapi->tx_buffers = NULL;
7629
7630                 if (tnapi->rx_rcb) {
7631                         dma_free_coherent(&tp->pdev->dev,
7632                                           TG3_RX_RCB_RING_BYTES(tp),
7633                                           tnapi->rx_rcb,
7634                                           tnapi->rx_rcb_mapping);
7635                         tnapi->rx_rcb = NULL;
7636                 }
7637
7638                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7639
7640                 if (tnapi->hw_status) {
7641                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7642                                           tnapi->hw_status,
7643                                           tnapi->status_mapping);
7644                         tnapi->hw_status = NULL;
7645                 }
7646         }
7647
7648         if (tp->hw_stats) {
7649                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7650                                   tp->hw_stats, tp->stats_mapping);
7651                 tp->hw_stats = NULL;
7652         }
7653 }
7654
7655 /*
7656  * Must not be invoked with interrupt sources disabled and
7657  * the hardware shutdown down.  Can sleep.
7658  */
7659 static int tg3_alloc_consistent(struct tg3 *tp)
7660 {
7661         int i;
7662
7663         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7664                                           sizeof(struct tg3_hw_stats),
7665                                           &tp->stats_mapping,
7666                                           GFP_KERNEL);
7667         if (!tp->hw_stats)
7668                 goto err_out;
7669
7670         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7671
7672         for (i = 0; i < tp->irq_cnt; i++) {
7673                 struct tg3_napi *tnapi = &tp->napi[i];
7674                 struct tg3_hw_status *sblk;
7675
7676                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7677                                                       TG3_HW_STATUS_SIZE,
7678                                                       &tnapi->status_mapping,
7679                                                       GFP_KERNEL);
7680                 if (!tnapi->hw_status)
7681                         goto err_out;
7682
7683                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7684                 sblk = tnapi->hw_status;
7685
7686                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7687                         goto err_out;
7688
7689                 /* If multivector TSS is enabled, vector 0 does not handle
7690                  * tx interrupts.  Don't allocate any resources for it.
7691                  */
7692                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7693                     (i && tg3_flag(tp, ENABLE_TSS))) {
7694                         tnapi->tx_buffers = kzalloc(
7695                                                sizeof(struct tg3_tx_ring_info) *
7696                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7697                         if (!tnapi->tx_buffers)
7698                                 goto err_out;
7699
7700                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7701                                                             TG3_TX_RING_BYTES,
7702                                                         &tnapi->tx_desc_mapping,
7703                                                             GFP_KERNEL);
7704                         if (!tnapi->tx_ring)
7705                                 goto err_out;
7706                 }
7707
7708                 /*
7709                  * When RSS is enabled, the status block format changes
7710                  * slightly.  The "rx_jumbo_consumer", "reserved",
7711                  * and "rx_mini_consumer" members get mapped to the
7712                  * other three rx return ring producer indexes.
7713                  */
7714                 switch (i) {
7715                 default:
7716                         if (tg3_flag(tp, ENABLE_RSS)) {
7717                                 tnapi->rx_rcb_prod_idx = NULL;
7718                                 break;
7719                         }
7720                         /* Fall through */
7721                 case 1:
7722                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7723                         break;
7724                 case 2:
7725                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7726                         break;
7727                 case 3:
7728                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7729                         break;
7730                 case 4:
7731                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7732                         break;
7733                 }
7734
7735                 /*
7736                  * If multivector RSS is enabled, vector 0 does not handle
7737                  * rx or tx interrupts.  Don't allocate any resources for it.
7738                  */
7739                 if (!i && tg3_flag(tp, ENABLE_RSS))
7740                         continue;
7741
7742                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7743                                                    TG3_RX_RCB_RING_BYTES(tp),
7744                                                    &tnapi->rx_rcb_mapping,
7745                                                    GFP_KERNEL);
7746                 if (!tnapi->rx_rcb)
7747                         goto err_out;
7748
7749                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7750         }
7751
7752         return 0;
7753
7754 err_out:
7755         tg3_free_consistent(tp);
7756         return -ENOMEM;
7757 }
7758
7759 #define MAX_WAIT_CNT 1000
7760
7761 /* To stop a block, clear the enable bit and poll till it
7762  * clears.  tp->lock is held.
7763  */
7764 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7765 {
7766         unsigned int i;
7767         u32 val;
7768
7769         if (tg3_flag(tp, 5705_PLUS)) {
7770                 switch (ofs) {
7771                 case RCVLSC_MODE:
7772                 case DMAC_MODE:
7773                 case MBFREE_MODE:
7774                 case BUFMGR_MODE:
7775                 case MEMARB_MODE:
7776                         /* We can't enable/disable these bits of the
7777                          * 5705/5750, just say success.
7778                          */
7779                         return 0;
7780
7781                 default:
7782                         break;
7783                 }
7784         }
7785
7786         val = tr32(ofs);
7787         val &= ~enable_bit;
7788         tw32_f(ofs, val);
7789
7790         for (i = 0; i < MAX_WAIT_CNT; i++) {
7791                 udelay(100);
7792                 val = tr32(ofs);
7793                 if ((val & enable_bit) == 0)
7794                         break;
7795         }
7796
7797         if (i == MAX_WAIT_CNT && !silent) {
7798                 dev_err(&tp->pdev->dev,
7799                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7800                         ofs, enable_bit);
7801                 return -ENODEV;
7802         }
7803
7804         return 0;
7805 }
7806
7807 /* tp->lock is held. */
7808 static int tg3_abort_hw(struct tg3 *tp, int silent)
7809 {
7810         int i, err;
7811
7812         tg3_disable_ints(tp);
7813
7814         tp->rx_mode &= ~RX_MODE_ENABLE;
7815         tw32_f(MAC_RX_MODE, tp->rx_mode);
7816         udelay(10);
7817
7818         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7819         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7820         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7821         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7822         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7823         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7824
7825         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7826         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7827         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7828         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7829         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7830         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7831         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7832
7833         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7834         tw32_f(MAC_MODE, tp->mac_mode);
7835         udelay(40);
7836
7837         tp->tx_mode &= ~TX_MODE_ENABLE;
7838         tw32_f(MAC_TX_MODE, tp->tx_mode);
7839
7840         for (i = 0; i < MAX_WAIT_CNT; i++) {
7841                 udelay(100);
7842                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7843                         break;
7844         }
7845         if (i >= MAX_WAIT_CNT) {
7846                 dev_err(&tp->pdev->dev,
7847                         "%s timed out, TX_MODE_ENABLE will not clear "
7848                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7849                 err |= -ENODEV;
7850         }
7851
7852         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7853         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7854         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7855
7856         tw32(FTQ_RESET, 0xffffffff);
7857         tw32(FTQ_RESET, 0x00000000);
7858
7859         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7860         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7861
7862         for (i = 0; i < tp->irq_cnt; i++) {
7863                 struct tg3_napi *tnapi = &tp->napi[i];
7864                 if (tnapi->hw_status)
7865                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7866         }
7867
7868         return err;
7869 }
7870
7871 /* Save PCI command register before chip reset */
7872 static void tg3_save_pci_state(struct tg3 *tp)
7873 {
7874         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7875 }
7876
7877 /* Restore PCI state after chip reset */
7878 static void tg3_restore_pci_state(struct tg3 *tp)
7879 {
7880         u32 val;
7881
7882         /* Re-enable indirect register accesses. */
7883         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7884                                tp->misc_host_ctrl);
7885
7886         /* Set MAX PCI retry to zero. */
7887         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7888         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7889             tg3_flag(tp, PCIX_MODE))
7890                 val |= PCISTATE_RETRY_SAME_DMA;
7891         /* Allow reads and writes to the APE register and memory space. */
7892         if (tg3_flag(tp, ENABLE_APE))
7893                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7894                        PCISTATE_ALLOW_APE_SHMEM_WR |
7895                        PCISTATE_ALLOW_APE_PSPACE_WR;
7896         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7897
7898         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7899
7900         if (!tg3_flag(tp, PCI_EXPRESS)) {
7901                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7902                                       tp->pci_cacheline_sz);
7903                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7904                                       tp->pci_lat_timer);
7905         }
7906
7907         /* Make sure PCI-X relaxed ordering bit is clear. */
7908         if (tg3_flag(tp, PCIX_MODE)) {
7909                 u16 pcix_cmd;
7910
7911                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7912                                      &pcix_cmd);
7913                 pcix_cmd &= ~PCI_X_CMD_ERO;
7914                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7915                                       pcix_cmd);
7916         }
7917
7918         if (tg3_flag(tp, 5780_CLASS)) {
7919
7920                 /* Chip reset on 5780 will reset MSI enable bit,
7921                  * so need to restore it.
7922                  */
7923                 if (tg3_flag(tp, USING_MSI)) {
7924                         u16 ctrl;
7925
7926                         pci_read_config_word(tp->pdev,
7927                                              tp->msi_cap + PCI_MSI_FLAGS,
7928                                              &ctrl);
7929                         pci_write_config_word(tp->pdev,
7930                                               tp->msi_cap + PCI_MSI_FLAGS,
7931                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7932                         val = tr32(MSGINT_MODE);
7933                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7934                 }
7935         }
7936 }
7937
7938 /* tp->lock is held. */
7939 static int tg3_chip_reset(struct tg3 *tp)
7940 {
7941         u32 val;
7942         void (*write_op)(struct tg3 *, u32, u32);
7943         int i, err;
7944
7945         tg3_nvram_lock(tp);
7946
7947         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7948
7949         /* No matching tg3_nvram_unlock() after this because
7950          * chip reset below will undo the nvram lock.
7951          */
7952         tp->nvram_lock_cnt = 0;
7953
7954         /* GRC_MISC_CFG core clock reset will clear the memory
7955          * enable bit in PCI register 4 and the MSI enable bit
7956          * on some chips, so we save relevant registers here.
7957          */
7958         tg3_save_pci_state(tp);
7959
7960         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7961             tg3_flag(tp, 5755_PLUS))
7962                 tw32(GRC_FASTBOOT_PC, 0);
7963
7964         /*
7965          * We must avoid the readl() that normally takes place.
7966          * It locks machines, causes machine checks, and other
7967          * fun things.  So, temporarily disable the 5701
7968          * hardware workaround, while we do the reset.
7969          */
7970         write_op = tp->write32;
7971         if (write_op == tg3_write_flush_reg32)
7972                 tp->write32 = tg3_write32;
7973
7974         /* Prevent the irq handler from reading or writing PCI registers
7975          * during chip reset when the memory enable bit in the PCI command
7976          * register may be cleared.  The chip does not generate interrupt
7977          * at this time, but the irq handler may still be called due to irq
7978          * sharing or irqpoll.
7979          */
7980         tg3_flag_set(tp, CHIP_RESETTING);
7981         for (i = 0; i < tp->irq_cnt; i++) {
7982                 struct tg3_napi *tnapi = &tp->napi[i];
7983                 if (tnapi->hw_status) {
7984                         tnapi->hw_status->status = 0;
7985                         tnapi->hw_status->status_tag = 0;
7986                 }
7987                 tnapi->last_tag = 0;
7988                 tnapi->last_irq_tag = 0;
7989         }
7990         smp_mb();
7991
7992         for (i = 0; i < tp->irq_cnt; i++)
7993                 synchronize_irq(tp->napi[i].irq_vec);
7994
7995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7996                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7997                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7998         }
7999
8000         /* do the reset */
8001         val = GRC_MISC_CFG_CORECLK_RESET;
8002
8003         if (tg3_flag(tp, PCI_EXPRESS)) {
8004                 /* Force PCIe 1.0a mode */
8005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8006                     !tg3_flag(tp, 57765_PLUS) &&
8007                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8008                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8009                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8010
8011                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8012                         tw32(GRC_MISC_CFG, (1 << 29));
8013                         val |= (1 << 29);
8014                 }
8015         }
8016
8017         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8018                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8019                 tw32(GRC_VCPU_EXT_CTRL,
8020                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8021         }
8022
8023         /* Manage gphy power for all CPMU absent PCIe devices. */
8024         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8025                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8026
8027         tw32(GRC_MISC_CFG, val);
8028
8029         /* restore 5701 hardware bug workaround write method */
8030         tp->write32 = write_op;
8031
8032         /* Unfortunately, we have to delay before the PCI read back.
8033          * Some 575X chips even will not respond to a PCI cfg access
8034          * when the reset command is given to the chip.
8035          *
8036          * How do these hardware designers expect things to work
8037          * properly if the PCI write is posted for a long period
8038          * of time?  It is always necessary to have some method by
8039          * which a register read back can occur to push the write
8040          * out which does the reset.
8041          *
8042          * For most tg3 variants the trick below was working.
8043          * Ho hum...
8044          */
8045         udelay(120);
8046
8047         /* Flush PCI posted writes.  The normal MMIO registers
8048          * are inaccessible at this time so this is the only
8049          * way to make this reliably (actually, this is no longer
8050          * the case, see above).  I tried to use indirect
8051          * register read/write but this upset some 5701 variants.
8052          */
8053         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8054
8055         udelay(120);
8056
8057         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
8058                 u16 val16;
8059
8060                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8061                         int i;
8062                         u32 cfg_val;
8063
8064                         /* Wait for link training to complete.  */
8065                         for (i = 0; i < 5000; i++)
8066                                 udelay(100);
8067
8068                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8069                         pci_write_config_dword(tp->pdev, 0xc4,
8070                                                cfg_val | (1 << 15));
8071                 }
8072
8073                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8074                 pci_read_config_word(tp->pdev,
8075                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8076                                      &val16);
8077                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8078                            PCI_EXP_DEVCTL_NOSNOOP_EN);
8079                 /*
8080                  * Older PCIe devices only support the 128 byte
8081                  * MPS setting.  Enforce the restriction.
8082                  */
8083                 if (!tg3_flag(tp, CPMU_PRESENT))
8084                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8085                 pci_write_config_word(tp->pdev,
8086                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8087                                       val16);
8088
8089                 /* Clear error status */
8090                 pci_write_config_word(tp->pdev,
8091                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8092                                       PCI_EXP_DEVSTA_CED |
8093                                       PCI_EXP_DEVSTA_NFED |
8094                                       PCI_EXP_DEVSTA_FED |
8095                                       PCI_EXP_DEVSTA_URD);
8096         }
8097
8098         tg3_restore_pci_state(tp);
8099
8100         tg3_flag_clear(tp, CHIP_RESETTING);
8101         tg3_flag_clear(tp, ERROR_PROCESSED);
8102
8103         val = 0;
8104         if (tg3_flag(tp, 5780_CLASS))
8105                 val = tr32(MEMARB_MODE);
8106         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8107
8108         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8109                 tg3_stop_fw(tp);
8110                 tw32(0x5000, 0x400);
8111         }
8112
8113         tw32(GRC_MODE, tp->grc_mode);
8114
8115         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8116                 val = tr32(0xc4);
8117
8118                 tw32(0xc4, val | (1 << 15));
8119         }
8120
8121         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8123                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8124                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8125                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8126                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8127         }
8128
8129         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8130                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8131                 val = tp->mac_mode;
8132         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8133                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8134                 val = tp->mac_mode;
8135         } else
8136                 val = 0;
8137
8138         tw32_f(MAC_MODE, val);
8139         udelay(40);
8140
8141         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8142
8143         err = tg3_poll_fw(tp);
8144         if (err)
8145                 return err;
8146
8147         tg3_mdio_start(tp);
8148
8149         if (tg3_flag(tp, PCI_EXPRESS) &&
8150             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8151             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8152             !tg3_flag(tp, 57765_PLUS)) {
8153                 val = tr32(0x7c00);
8154
8155                 tw32(0x7c00, val | (1 << 25));
8156         }
8157
8158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8159                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8160                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8161         }
8162
8163         /* Reprobe ASF enable state.  */
8164         tg3_flag_clear(tp, ENABLE_ASF);
8165         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8166         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8167         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8168                 u32 nic_cfg;
8169
8170                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8171                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8172                         tg3_flag_set(tp, ENABLE_ASF);
8173                         tp->last_event_jiffies = jiffies;
8174                         if (tg3_flag(tp, 5750_PLUS))
8175                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8176                 }
8177         }
8178
8179         return 0;
8180 }
8181
8182 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8183 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8184
8185 /* tp->lock is held. */
8186 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8187 {
8188         int err;
8189
8190         tg3_stop_fw(tp);
8191
8192         tg3_write_sig_pre_reset(tp, kind);
8193
8194         tg3_abort_hw(tp, silent);
8195         err = tg3_chip_reset(tp);
8196
8197         __tg3_set_mac_addr(tp, 0);
8198
8199         tg3_write_sig_legacy(tp, kind);
8200         tg3_write_sig_post_reset(tp, kind);
8201
8202         if (tp->hw_stats) {
8203                 /* Save the stats across chip resets... */
8204                 tg3_get_nstats(tp, &tp->net_stats_prev);
8205                 tg3_get_estats(tp, &tp->estats_prev);
8206
8207                 /* And make sure the next sample is new data */
8208                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8209         }
8210
8211         if (err)
8212                 return err;
8213
8214         return 0;
8215 }
8216
8217 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8218 {
8219         struct tg3 *tp = netdev_priv(dev);
8220         struct sockaddr *addr = p;
8221         int err = 0, skip_mac_1 = 0;
8222
8223         if (!is_valid_ether_addr(addr->sa_data))
8224                 return -EADDRNOTAVAIL;
8225
8226         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8227
8228         if (!netif_running(dev))
8229                 return 0;
8230
8231         if (tg3_flag(tp, ENABLE_ASF)) {
8232                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8233
8234                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8235                 addr0_low = tr32(MAC_ADDR_0_LOW);
8236                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8237                 addr1_low = tr32(MAC_ADDR_1_LOW);
8238
8239                 /* Skip MAC addr 1 if ASF is using it. */
8240                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8241                     !(addr1_high == 0 && addr1_low == 0))
8242                         skip_mac_1 = 1;
8243         }
8244         spin_lock_bh(&tp->lock);
8245         __tg3_set_mac_addr(tp, skip_mac_1);
8246         spin_unlock_bh(&tp->lock);
8247
8248         return err;
8249 }
8250
8251 /* tp->lock is held. */
8252 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8253                            dma_addr_t mapping, u32 maxlen_flags,
8254                            u32 nic_addr)
8255 {
8256         tg3_write_mem(tp,
8257                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8258                       ((u64) mapping >> 32));
8259         tg3_write_mem(tp,
8260                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8261                       ((u64) mapping & 0xffffffff));
8262         tg3_write_mem(tp,
8263                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8264                        maxlen_flags);
8265
8266         if (!tg3_flag(tp, 5705_PLUS))
8267                 tg3_write_mem(tp,
8268                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8269                               nic_addr);
8270 }
8271
8272 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8273 {
8274         int i;
8275
8276         if (!tg3_flag(tp, ENABLE_TSS)) {
8277                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8278                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8279                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8280         } else {
8281                 tw32(HOSTCC_TXCOL_TICKS, 0);
8282                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8283                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8284         }
8285
8286         if (!tg3_flag(tp, ENABLE_RSS)) {
8287                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8288                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8289                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8290         } else {
8291                 tw32(HOSTCC_RXCOL_TICKS, 0);
8292                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8293                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8294         }
8295
8296         if (!tg3_flag(tp, 5705_PLUS)) {
8297                 u32 val = ec->stats_block_coalesce_usecs;
8298
8299                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8300                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8301
8302                 if (!netif_carrier_ok(tp->dev))
8303                         val = 0;
8304
8305                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8306         }
8307
8308         for (i = 0; i < tp->irq_cnt - 1; i++) {
8309                 u32 reg;
8310
8311                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8312                 tw32(reg, ec->rx_coalesce_usecs);
8313                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8314                 tw32(reg, ec->rx_max_coalesced_frames);
8315                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8316                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8317
8318                 if (tg3_flag(tp, ENABLE_TSS)) {
8319                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8320                         tw32(reg, ec->tx_coalesce_usecs);
8321                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8322                         tw32(reg, ec->tx_max_coalesced_frames);
8323                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8324                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8325                 }
8326         }
8327
8328         for (; i < tp->irq_max - 1; i++) {
8329                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8330                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8331                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8332
8333                 if (tg3_flag(tp, ENABLE_TSS)) {
8334                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8335                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8336                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8337                 }
8338         }
8339 }
8340
8341 /* tp->lock is held. */
8342 static void tg3_rings_reset(struct tg3 *tp)
8343 {
8344         int i;
8345         u32 stblk, txrcb, rxrcb, limit;
8346         struct tg3_napi *tnapi = &tp->napi[0];
8347
8348         /* Disable all transmit rings but the first. */
8349         if (!tg3_flag(tp, 5705_PLUS))
8350                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8351         else if (tg3_flag(tp, 5717_PLUS))
8352                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8353         else if (tg3_flag(tp, 57765_CLASS))
8354                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8355         else
8356                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8357
8358         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8359              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8360                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8361                               BDINFO_FLAGS_DISABLED);
8362
8363
8364         /* Disable all receive return rings but the first. */
8365         if (tg3_flag(tp, 5717_PLUS))
8366                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8367         else if (!tg3_flag(tp, 5705_PLUS))
8368                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8369         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8370                  tg3_flag(tp, 57765_CLASS))
8371                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8372         else
8373                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8374
8375         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8376              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8377                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8378                               BDINFO_FLAGS_DISABLED);
8379
8380         /* Disable interrupts */
8381         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8382         tp->napi[0].chk_msi_cnt = 0;
8383         tp->napi[0].last_rx_cons = 0;
8384         tp->napi[0].last_tx_cons = 0;
8385
8386         /* Zero mailbox registers. */
8387         if (tg3_flag(tp, SUPPORT_MSIX)) {
8388                 for (i = 1; i < tp->irq_max; i++) {
8389                         tp->napi[i].tx_prod = 0;
8390                         tp->napi[i].tx_cons = 0;
8391                         if (tg3_flag(tp, ENABLE_TSS))
8392                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8393                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8394                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8395                         tp->napi[i].chk_msi_cnt = 0;
8396                         tp->napi[i].last_rx_cons = 0;
8397                         tp->napi[i].last_tx_cons = 0;
8398                 }
8399                 if (!tg3_flag(tp, ENABLE_TSS))
8400                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8401         } else {
8402                 tp->napi[0].tx_prod = 0;
8403                 tp->napi[0].tx_cons = 0;
8404                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8405                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8406         }
8407
8408         /* Make sure the NIC-based send BD rings are disabled. */
8409         if (!tg3_flag(tp, 5705_PLUS)) {
8410                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8411                 for (i = 0; i < 16; i++)
8412                         tw32_tx_mbox(mbox + i * 8, 0);
8413         }
8414
8415         txrcb = NIC_SRAM_SEND_RCB;
8416         rxrcb = NIC_SRAM_RCV_RET_RCB;
8417
8418         /* Clear status block in ram. */
8419         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8420
8421         /* Set status block DMA address */
8422         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8423              ((u64) tnapi->status_mapping >> 32));
8424         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8425              ((u64) tnapi->status_mapping & 0xffffffff));
8426
8427         if (tnapi->tx_ring) {
8428                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8429                                (TG3_TX_RING_SIZE <<
8430                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8431                                NIC_SRAM_TX_BUFFER_DESC);
8432                 txrcb += TG3_BDINFO_SIZE;
8433         }
8434
8435         if (tnapi->rx_rcb) {
8436                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8437                                (tp->rx_ret_ring_mask + 1) <<
8438                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8439                 rxrcb += TG3_BDINFO_SIZE;
8440         }
8441
8442         stblk = HOSTCC_STATBLCK_RING1;
8443
8444         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8445                 u64 mapping = (u64)tnapi->status_mapping;
8446                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8447                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8448
8449                 /* Clear status block in ram. */
8450                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8451
8452                 if (tnapi->tx_ring) {
8453                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8454                                        (TG3_TX_RING_SIZE <<
8455                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8456                                        NIC_SRAM_TX_BUFFER_DESC);
8457                         txrcb += TG3_BDINFO_SIZE;
8458                 }
8459
8460                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8461                                ((tp->rx_ret_ring_mask + 1) <<
8462                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8463
8464                 stblk += 8;
8465                 rxrcb += TG3_BDINFO_SIZE;
8466         }
8467 }
8468
8469 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8470 {
8471         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8472
8473         if (!tg3_flag(tp, 5750_PLUS) ||
8474             tg3_flag(tp, 5780_CLASS) ||
8475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8477             tg3_flag(tp, 57765_PLUS))
8478                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8479         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8480                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8481                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8482         else
8483                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8484
8485         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8486         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8487
8488         val = min(nic_rep_thresh, host_rep_thresh);
8489         tw32(RCVBDI_STD_THRESH, val);
8490
8491         if (tg3_flag(tp, 57765_PLUS))
8492                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8493
8494         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8495                 return;
8496
8497         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8498
8499         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8500
8501         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8502         tw32(RCVBDI_JUMBO_THRESH, val);
8503
8504         if (tg3_flag(tp, 57765_PLUS))
8505                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8506 }
8507
8508 static inline u32 calc_crc(unsigned char *buf, int len)
8509 {
8510         u32 reg;
8511         u32 tmp;
8512         int j, k;
8513
8514         reg = 0xffffffff;
8515
8516         for (j = 0; j < len; j++) {
8517                 reg ^= buf[j];
8518
8519                 for (k = 0; k < 8; k++) {
8520                         tmp = reg & 0x01;
8521
8522                         reg >>= 1;
8523
8524                         if (tmp)
8525                                 reg ^= 0xedb88320;
8526                 }
8527         }
8528
8529         return ~reg;
8530 }
8531
8532 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8533 {
8534         /* accept or reject all multicast frames */
8535         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8536         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8537         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8538         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8539 }
8540
8541 static void __tg3_set_rx_mode(struct net_device *dev)
8542 {
8543         struct tg3 *tp = netdev_priv(dev);
8544         u32 rx_mode;
8545
8546         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8547                                   RX_MODE_KEEP_VLAN_TAG);
8548
8549 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8550         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8551          * flag clear.
8552          */
8553         if (!tg3_flag(tp, ENABLE_ASF))
8554                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8555 #endif
8556
8557         if (dev->flags & IFF_PROMISC) {
8558                 /* Promiscuous mode. */
8559                 rx_mode |= RX_MODE_PROMISC;
8560         } else if (dev->flags & IFF_ALLMULTI) {
8561                 /* Accept all multicast. */
8562                 tg3_set_multi(tp, 1);
8563         } else if (netdev_mc_empty(dev)) {
8564                 /* Reject all multicast. */
8565                 tg3_set_multi(tp, 0);
8566         } else {
8567                 /* Accept one or more multicast(s). */
8568                 struct netdev_hw_addr *ha;
8569                 u32 mc_filter[4] = { 0, };
8570                 u32 regidx;
8571                 u32 bit;
8572                 u32 crc;
8573
8574                 netdev_for_each_mc_addr(ha, dev) {
8575                         crc = calc_crc(ha->addr, ETH_ALEN);
8576                         bit = ~crc & 0x7f;
8577                         regidx = (bit & 0x60) >> 5;
8578                         bit &= 0x1f;
8579                         mc_filter[regidx] |= (1 << bit);
8580                 }
8581
8582                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8583                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8584                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8585                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8586         }
8587
8588         if (rx_mode != tp->rx_mode) {
8589                 tp->rx_mode = rx_mode;
8590                 tw32_f(MAC_RX_MODE, rx_mode);
8591                 udelay(10);
8592         }
8593 }
8594
8595 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8596 {
8597         int i;
8598
8599         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8600                 tp->rss_ind_tbl[i] =
8601                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8602 }
8603
8604 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8605 {
8606         int i;
8607
8608         if (!tg3_flag(tp, SUPPORT_MSIX))
8609                 return;
8610
8611         if (tp->irq_cnt <= 2) {
8612                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8613                 return;
8614         }
8615
8616         /* Validate table against current IRQ count */
8617         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8618                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8619                         break;
8620         }
8621
8622         if (i != TG3_RSS_INDIR_TBL_SIZE)
8623                 tg3_rss_init_dflt_indir_tbl(tp);
8624 }
8625
8626 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8627 {
8628         int i = 0;
8629         u32 reg = MAC_RSS_INDIR_TBL_0;
8630
8631         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8632                 u32 val = tp->rss_ind_tbl[i];
8633                 i++;
8634                 for (; i % 8; i++) {
8635                         val <<= 4;
8636                         val |= tp->rss_ind_tbl[i];
8637                 }
8638                 tw32(reg, val);
8639                 reg += 4;
8640         }
8641 }
8642
8643 /* tp->lock is held. */
8644 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8645 {
8646         u32 val, rdmac_mode;
8647         int i, err, limit;
8648         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8649
8650         tg3_disable_ints(tp);
8651
8652         tg3_stop_fw(tp);
8653
8654         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8655
8656         if (tg3_flag(tp, INIT_COMPLETE))
8657                 tg3_abort_hw(tp, 1);
8658
8659         /* Enable MAC control of LPI */
8660         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8661                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8662                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8663                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8664
8665                 tw32_f(TG3_CPMU_EEE_CTRL,
8666                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8667
8668                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8669                       TG3_CPMU_EEEMD_LPI_IN_TX |
8670                       TG3_CPMU_EEEMD_LPI_IN_RX |
8671                       TG3_CPMU_EEEMD_EEE_ENABLE;
8672
8673                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8674                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8675
8676                 if (tg3_flag(tp, ENABLE_APE))
8677                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8678
8679                 tw32_f(TG3_CPMU_EEE_MODE, val);
8680
8681                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8682                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8683                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8684
8685                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8686                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8687                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8688         }
8689
8690         if (reset_phy)
8691                 tg3_phy_reset(tp);
8692
8693         err = tg3_chip_reset(tp);
8694         if (err)
8695                 return err;
8696
8697         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8698
8699         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8700                 val = tr32(TG3_CPMU_CTRL);
8701                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8702                 tw32(TG3_CPMU_CTRL, val);
8703
8704                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8705                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8706                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8707                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8708
8709                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8710                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8711                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8712                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8713
8714                 val = tr32(TG3_CPMU_HST_ACC);
8715                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8716                 val |= CPMU_HST_ACC_MACCLK_6_25;
8717                 tw32(TG3_CPMU_HST_ACC, val);
8718         }
8719
8720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8721                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8722                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8723                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8724                 tw32(PCIE_PWR_MGMT_THRESH, val);
8725
8726                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8727                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8728
8729                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8730
8731                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8732                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8733         }
8734
8735         if (tg3_flag(tp, L1PLLPD_EN)) {
8736                 u32 grc_mode = tr32(GRC_MODE);
8737
8738                 /* Access the lower 1K of PL PCIE block registers. */
8739                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8740                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8741
8742                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8743                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8744                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8745
8746                 tw32(GRC_MODE, grc_mode);
8747         }
8748
8749         if (tg3_flag(tp, 57765_CLASS)) {
8750                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8751                         u32 grc_mode = tr32(GRC_MODE);
8752
8753                         /* Access the lower 1K of PL PCIE block registers. */
8754                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8755                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8756
8757                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8758                                    TG3_PCIE_PL_LO_PHYCTL5);
8759                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8760                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8761
8762                         tw32(GRC_MODE, grc_mode);
8763                 }
8764
8765                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8766                         u32 grc_mode = tr32(GRC_MODE);
8767
8768                         /* Access the lower 1K of DL PCIE block registers. */
8769                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8770                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8771
8772                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8773                                    TG3_PCIE_DL_LO_FTSMAX);
8774                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8775                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8776                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8777
8778                         tw32(GRC_MODE, grc_mode);
8779                 }
8780
8781                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8782                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8783                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8784                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8785         }
8786
8787         /* This works around an issue with Athlon chipsets on
8788          * B3 tigon3 silicon.  This bit has no effect on any
8789          * other revision.  But do not set this on PCI Express
8790          * chips and don't even touch the clocks if the CPMU is present.
8791          */
8792         if (!tg3_flag(tp, CPMU_PRESENT)) {
8793                 if (!tg3_flag(tp, PCI_EXPRESS))
8794                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8795                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8796         }
8797
8798         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8799             tg3_flag(tp, PCIX_MODE)) {
8800                 val = tr32(TG3PCI_PCISTATE);
8801                 val |= PCISTATE_RETRY_SAME_DMA;
8802                 tw32(TG3PCI_PCISTATE, val);
8803         }
8804
8805         if (tg3_flag(tp, ENABLE_APE)) {
8806                 /* Allow reads and writes to the
8807                  * APE register and memory space.
8808                  */
8809                 val = tr32(TG3PCI_PCISTATE);
8810                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8811                        PCISTATE_ALLOW_APE_SHMEM_WR |
8812                        PCISTATE_ALLOW_APE_PSPACE_WR;
8813                 tw32(TG3PCI_PCISTATE, val);
8814         }
8815
8816         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8817                 /* Enable some hw fixes.  */
8818                 val = tr32(TG3PCI_MSI_DATA);
8819                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8820                 tw32(TG3PCI_MSI_DATA, val);
8821         }
8822
8823         /* Descriptor ring init may make accesses to the
8824          * NIC SRAM area to setup the TX descriptors, so we
8825          * can only do this after the hardware has been
8826          * successfully reset.
8827          */
8828         err = tg3_init_rings(tp);
8829         if (err)
8830                 return err;
8831
8832         if (tg3_flag(tp, 57765_PLUS)) {
8833                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8834                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8835                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8836                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8837                 if (!tg3_flag(tp, 57765_CLASS) &&
8838                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8839                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8840                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8841         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8842                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8843                 /* This value is determined during the probe time DMA
8844                  * engine test, tg3_test_dma.
8845                  */
8846                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8847         }
8848
8849         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8850                           GRC_MODE_4X_NIC_SEND_RINGS |
8851                           GRC_MODE_NO_TX_PHDR_CSUM |
8852                           GRC_MODE_NO_RX_PHDR_CSUM);
8853         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8854
8855         /* Pseudo-header checksum is done by hardware logic and not
8856          * the offload processers, so make the chip do the pseudo-
8857          * header checksums on receive.  For transmit it is more
8858          * convenient to do the pseudo-header checksum in software
8859          * as Linux does that on transmit for us in all cases.
8860          */
8861         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8862
8863         tw32(GRC_MODE,
8864              tp->grc_mode |
8865              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8866
8867         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8868         val = tr32(GRC_MISC_CFG);
8869         val &= ~0xff;
8870         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8871         tw32(GRC_MISC_CFG, val);
8872
8873         /* Initialize MBUF/DESC pool. */
8874         if (tg3_flag(tp, 5750_PLUS)) {
8875                 /* Do nothing.  */
8876         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8877                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8878                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8879                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8880                 else
8881                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8882                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8883                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8884         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8885                 int fw_len;
8886
8887                 fw_len = tp->fw_len;
8888                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8889                 tw32(BUFMGR_MB_POOL_ADDR,
8890                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8891                 tw32(BUFMGR_MB_POOL_SIZE,
8892                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8893         }
8894
8895         if (tp->dev->mtu <= ETH_DATA_LEN) {
8896                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8897                      tp->bufmgr_config.mbuf_read_dma_low_water);
8898                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8899                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8900                 tw32(BUFMGR_MB_HIGH_WATER,
8901                      tp->bufmgr_config.mbuf_high_water);
8902         } else {
8903                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8904                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8905                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8906                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8907                 tw32(BUFMGR_MB_HIGH_WATER,
8908                      tp->bufmgr_config.mbuf_high_water_jumbo);
8909         }
8910         tw32(BUFMGR_DMA_LOW_WATER,
8911              tp->bufmgr_config.dma_low_water);
8912         tw32(BUFMGR_DMA_HIGH_WATER,
8913              tp->bufmgr_config.dma_high_water);
8914
8915         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8917                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8919             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8920             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8921                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8922         tw32(BUFMGR_MODE, val);
8923         for (i = 0; i < 2000; i++) {
8924                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8925                         break;
8926                 udelay(10);
8927         }
8928         if (i >= 2000) {
8929                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8930                 return -ENODEV;
8931         }
8932
8933         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8934                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8935
8936         tg3_setup_rxbd_thresholds(tp);
8937
8938         /* Initialize TG3_BDINFO's at:
8939          *  RCVDBDI_STD_BD:     standard eth size rx ring
8940          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8941          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8942          *
8943          * like so:
8944          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8945          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8946          *                              ring attribute flags
8947          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8948          *
8949          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8950          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8951          *
8952          * The size of each ring is fixed in the firmware, but the location is
8953          * configurable.
8954          */
8955         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8956              ((u64) tpr->rx_std_mapping >> 32));
8957         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8958              ((u64) tpr->rx_std_mapping & 0xffffffff));
8959         if (!tg3_flag(tp, 5717_PLUS))
8960                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8961                      NIC_SRAM_RX_BUFFER_DESC);
8962
8963         /* Disable the mini ring */
8964         if (!tg3_flag(tp, 5705_PLUS))
8965                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8966                      BDINFO_FLAGS_DISABLED);
8967
8968         /* Program the jumbo buffer descriptor ring control
8969          * blocks on those devices that have them.
8970          */
8971         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8972             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8973
8974                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8975                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8976                              ((u64) tpr->rx_jmb_mapping >> 32));
8977                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8978                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8979                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8980                               BDINFO_FLAGS_MAXLEN_SHIFT;
8981                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8982                              val | BDINFO_FLAGS_USE_EXT_RECV);
8983                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8984                             tg3_flag(tp, 57765_CLASS))
8985                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8986                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8987                 } else {
8988                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8989                              BDINFO_FLAGS_DISABLED);
8990                 }
8991
8992                 if (tg3_flag(tp, 57765_PLUS)) {
8993                         val = TG3_RX_STD_RING_SIZE(tp);
8994                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8995                         val |= (TG3_RX_STD_DMA_SZ << 2);
8996                 } else
8997                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8998         } else
8999                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9000
9001         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9002
9003         tpr->rx_std_prod_idx = tp->rx_pending;
9004         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9005
9006         tpr->rx_jmb_prod_idx =
9007                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9008         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9009
9010         tg3_rings_reset(tp);
9011
9012         /* Initialize MAC address and backoff seed. */
9013         __tg3_set_mac_addr(tp, 0);
9014
9015         /* MTU + ethernet header + FCS + optional VLAN tag */
9016         tw32(MAC_RX_MTU_SIZE,
9017              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9018
9019         /* The slot time is changed by tg3_setup_phy if we
9020          * run at gigabit with half duplex.
9021          */
9022         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9023               (6 << TX_LENGTHS_IPG_SHIFT) |
9024               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9025
9026         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9027                 val |= tr32(MAC_TX_LENGTHS) &
9028                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9029                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9030
9031         tw32(MAC_TX_LENGTHS, val);
9032
9033         /* Receive rules. */
9034         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9035         tw32(RCVLPC_CONFIG, 0x0181);
9036
9037         /* Calculate RDMAC_MODE setting early, we need it to determine
9038          * the RCVLPC_STATE_ENABLE mask.
9039          */
9040         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9041                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9042                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9043                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9044                       RDMAC_MODE_LNGREAD_ENAB);
9045
9046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9047                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9048
9049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9052                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9053                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9054                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9055
9056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9057             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9058                 if (tg3_flag(tp, TSO_CAPABLE) &&
9059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9060                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9061                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9062                            !tg3_flag(tp, IS_5788)) {
9063                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9064                 }
9065         }
9066
9067         if (tg3_flag(tp, PCI_EXPRESS))
9068                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9069
9070         if (tg3_flag(tp, HW_TSO_1) ||
9071             tg3_flag(tp, HW_TSO_2) ||
9072             tg3_flag(tp, HW_TSO_3))
9073                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9074
9075         if (tg3_flag(tp, 57765_PLUS) ||
9076             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9078                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9079
9080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9081                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9082
9083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9085             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9087             tg3_flag(tp, 57765_PLUS)) {
9088                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9089                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9090                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9091                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9092                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9093                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9094                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9095                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9096                 }
9097                 tw32(TG3_RDMA_RSRVCTRL_REG,
9098                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9099         }
9100
9101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9102             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9103                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9104                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9105                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9106                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9107         }
9108
9109         /* Receive/send statistics. */
9110         if (tg3_flag(tp, 5750_PLUS)) {
9111                 val = tr32(RCVLPC_STATS_ENABLE);
9112                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9113                 tw32(RCVLPC_STATS_ENABLE, val);
9114         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9115                    tg3_flag(tp, TSO_CAPABLE)) {
9116                 val = tr32(RCVLPC_STATS_ENABLE);
9117                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9118                 tw32(RCVLPC_STATS_ENABLE, val);
9119         } else {
9120                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9121         }
9122         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9123         tw32(SNDDATAI_STATSENAB, 0xffffff);
9124         tw32(SNDDATAI_STATSCTRL,
9125              (SNDDATAI_SCTRL_ENABLE |
9126               SNDDATAI_SCTRL_FASTUPD));
9127
9128         /* Setup host coalescing engine. */
9129         tw32(HOSTCC_MODE, 0);
9130         for (i = 0; i < 2000; i++) {
9131                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9132                         break;
9133                 udelay(10);
9134         }
9135
9136         __tg3_set_coalesce(tp, &tp->coal);
9137
9138         if (!tg3_flag(tp, 5705_PLUS)) {
9139                 /* Status/statistics block address.  See tg3_timer,
9140                  * the tg3_periodic_fetch_stats call there, and
9141                  * tg3_get_stats to see how this works for 5705/5750 chips.
9142                  */
9143                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9144                      ((u64) tp->stats_mapping >> 32));
9145                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9146                      ((u64) tp->stats_mapping & 0xffffffff));
9147                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9148
9149                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9150
9151                 /* Clear statistics and status block memory areas */
9152                 for (i = NIC_SRAM_STATS_BLK;
9153                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9154                      i += sizeof(u32)) {
9155                         tg3_write_mem(tp, i, 0);
9156                         udelay(40);
9157                 }
9158         }
9159
9160         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9161
9162         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9163         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9164         if (!tg3_flag(tp, 5705_PLUS))
9165                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9166
9167         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9168                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9169                 /* reset to prevent losing 1st rx packet intermittently */
9170                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9171                 udelay(10);
9172         }
9173
9174         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9175                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9176                         MAC_MODE_FHDE_ENABLE;
9177         if (tg3_flag(tp, ENABLE_APE))
9178                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9179         if (!tg3_flag(tp, 5705_PLUS) &&
9180             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9181             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9182                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9183         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9184         udelay(40);
9185
9186         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9187          * If TG3_FLAG_IS_NIC is zero, we should read the
9188          * register to preserve the GPIO settings for LOMs. The GPIOs,
9189          * whether used as inputs or outputs, are set by boot code after
9190          * reset.
9191          */
9192         if (!tg3_flag(tp, IS_NIC)) {
9193                 u32 gpio_mask;
9194
9195                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9196                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9197                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9198
9199                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9200                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9201                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9202
9203                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9204                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9205
9206                 tp->grc_local_ctrl &= ~gpio_mask;
9207                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9208
9209                 /* GPIO1 must be driven high for eeprom write protect */
9210                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9211                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9212                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9213         }
9214         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9215         udelay(100);
9216
9217         if (tg3_flag(tp, USING_MSIX)) {
9218                 val = tr32(MSGINT_MODE);
9219                 val |= MSGINT_MODE_ENABLE;
9220                 if (tp->irq_cnt > 1)
9221                         val |= MSGINT_MODE_MULTIVEC_EN;
9222                 if (!tg3_flag(tp, 1SHOT_MSI))
9223                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9224                 tw32(MSGINT_MODE, val);
9225         }
9226
9227         if (!tg3_flag(tp, 5705_PLUS)) {
9228                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9229                 udelay(40);
9230         }
9231
9232         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9233                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9234                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9235                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9236                WDMAC_MODE_LNGREAD_ENAB);
9237
9238         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9239             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9240                 if (tg3_flag(tp, TSO_CAPABLE) &&
9241                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9242                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9243                         /* nothing */
9244                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9245                            !tg3_flag(tp, IS_5788)) {
9246                         val |= WDMAC_MODE_RX_ACCEL;
9247                 }
9248         }
9249
9250         /* Enable host coalescing bug fix */
9251         if (tg3_flag(tp, 5755_PLUS))
9252                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9253
9254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9255                 val |= WDMAC_MODE_BURST_ALL_DATA;
9256
9257         tw32_f(WDMAC_MODE, val);
9258         udelay(40);
9259
9260         if (tg3_flag(tp, PCIX_MODE)) {
9261                 u16 pcix_cmd;
9262
9263                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9264                                      &pcix_cmd);
9265                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9266                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9267                         pcix_cmd |= PCI_X_CMD_READ_2K;
9268                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9269                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9270                         pcix_cmd |= PCI_X_CMD_READ_2K;
9271                 }
9272                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9273                                       pcix_cmd);
9274         }
9275
9276         tw32_f(RDMAC_MODE, rdmac_mode);
9277         udelay(40);
9278
9279         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9280                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9281                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9282                                 break;
9283                 }
9284                 if (i < TG3_NUM_RDMA_CHANNELS) {
9285                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9286                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9287                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9288                         tg3_flag_set(tp, 5719_RDMA_BUG);
9289                 }
9290         }
9291
9292         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9293         if (!tg3_flag(tp, 5705_PLUS))
9294                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9295
9296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9297                 tw32(SNDDATAC_MODE,
9298                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9299         else
9300                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9301
9302         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9303         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9304         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9305         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9306                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9307         tw32(RCVDBDI_MODE, val);
9308         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9309         if (tg3_flag(tp, HW_TSO_1) ||
9310             tg3_flag(tp, HW_TSO_2) ||
9311             tg3_flag(tp, HW_TSO_3))
9312                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9313         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9314         if (tg3_flag(tp, ENABLE_TSS))
9315                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9316         tw32(SNDBDI_MODE, val);
9317         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9318
9319         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9320                 err = tg3_load_5701_a0_firmware_fix(tp);
9321                 if (err)
9322                         return err;
9323         }
9324
9325         if (tg3_flag(tp, TSO_CAPABLE)) {
9326                 err = tg3_load_tso_firmware(tp);
9327                 if (err)
9328                         return err;
9329         }
9330
9331         tp->tx_mode = TX_MODE_ENABLE;
9332
9333         if (tg3_flag(tp, 5755_PLUS) ||
9334             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9335                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9336
9337         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9338                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9339                 tp->tx_mode &= ~val;
9340                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9341         }
9342
9343         tw32_f(MAC_TX_MODE, tp->tx_mode);
9344         udelay(100);
9345
9346         if (tg3_flag(tp, ENABLE_RSS)) {
9347                 tg3_rss_write_indir_tbl(tp);
9348
9349                 /* Setup the "secret" hash key. */
9350                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9351                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9352                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9353                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9354                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9355                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9356                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9357                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9358                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9359                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9360         }
9361
9362         tp->rx_mode = RX_MODE_ENABLE;
9363         if (tg3_flag(tp, 5755_PLUS))
9364                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9365
9366         if (tg3_flag(tp, ENABLE_RSS))
9367                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9368                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9369                                RX_MODE_RSS_IPV6_HASH_EN |
9370                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9371                                RX_MODE_RSS_IPV4_HASH_EN |
9372                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9373
9374         tw32_f(MAC_RX_MODE, tp->rx_mode);
9375         udelay(10);
9376
9377         tw32(MAC_LED_CTRL, tp->led_ctrl);
9378
9379         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9380         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9381                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9382                 udelay(10);
9383         }
9384         tw32_f(MAC_RX_MODE, tp->rx_mode);
9385         udelay(10);
9386
9387         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9388                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9389                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9390                         /* Set drive transmission level to 1.2V  */
9391                         /* only if the signal pre-emphasis bit is not set  */
9392                         val = tr32(MAC_SERDES_CFG);
9393                         val &= 0xfffff000;
9394                         val |= 0x880;
9395                         tw32(MAC_SERDES_CFG, val);
9396                 }
9397                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9398                         tw32(MAC_SERDES_CFG, 0x616000);
9399         }
9400
9401         /* Prevent chip from dropping frames when flow control
9402          * is enabled.
9403          */
9404         if (tg3_flag(tp, 57765_CLASS))
9405                 val = 1;
9406         else
9407                 val = 2;
9408         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9409
9410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9411             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9412                 /* Use hardware link auto-negotiation */
9413                 tg3_flag_set(tp, HW_AUTONEG);
9414         }
9415
9416         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9417             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9418                 u32 tmp;
9419
9420                 tmp = tr32(SERDES_RX_CTRL);
9421                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9422                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9423                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9424                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9425         }
9426
9427         if (!tg3_flag(tp, USE_PHYLIB)) {
9428                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9429                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9430
9431                 err = tg3_setup_phy(tp, 0);
9432                 if (err)
9433                         return err;
9434
9435                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9436                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9437                         u32 tmp;
9438
9439                         /* Clear CRC stats. */
9440                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9441                                 tg3_writephy(tp, MII_TG3_TEST1,
9442                                              tmp | MII_TG3_TEST1_CRC_EN);
9443                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9444                         }
9445                 }
9446         }
9447
9448         __tg3_set_rx_mode(tp->dev);
9449
9450         /* Initialize receive rules. */
9451         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9452         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9453         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9454         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9455
9456         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9457                 limit = 8;
9458         else
9459                 limit = 16;
9460         if (tg3_flag(tp, ENABLE_ASF))
9461                 limit -= 4;
9462         switch (limit) {
9463         case 16:
9464                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9465         case 15:
9466                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9467         case 14:
9468                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9469         case 13:
9470                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9471         case 12:
9472                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9473         case 11:
9474                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9475         case 10:
9476                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9477         case 9:
9478                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9479         case 8:
9480                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9481         case 7:
9482                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9483         case 6:
9484                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9485         case 5:
9486                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9487         case 4:
9488                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9489         case 3:
9490                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9491         case 2:
9492         case 1:
9493
9494         default:
9495                 break;
9496         }
9497
9498         if (tg3_flag(tp, ENABLE_APE))
9499                 /* Write our heartbeat update interval to APE. */
9500                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9501                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9502
9503         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9504
9505         return 0;
9506 }
9507
9508 /* Called at device open time to get the chip ready for
9509  * packet processing.  Invoked with tp->lock held.
9510  */
9511 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9512 {
9513         tg3_switch_clocks(tp);
9514
9515         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9516
9517         return tg3_reset_hw(tp, reset_phy);
9518 }
9519
9520 #if IS_ENABLED(CONFIG_HWMON)
9521 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9522 {
9523         int i;
9524
9525         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9526                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9527
9528                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9529                 off += len;
9530
9531                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9532                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9533                         memset(ocir, 0, TG3_OCIR_LEN);
9534         }
9535 }
9536
9537 /* sysfs attributes for hwmon */
9538 static ssize_t tg3_show_temp(struct device *dev,
9539                              struct device_attribute *devattr, char *buf)
9540 {
9541         struct pci_dev *pdev = to_pci_dev(dev);
9542         struct net_device *netdev = pci_get_drvdata(pdev);
9543         struct tg3 *tp = netdev_priv(netdev);
9544         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9545         u32 temperature;
9546
9547         spin_lock_bh(&tp->lock);
9548         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9549                                 sizeof(temperature));
9550         spin_unlock_bh(&tp->lock);
9551         return sprintf(buf, "%u\n", temperature);
9552 }
9553
9554
9555 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9556                           TG3_TEMP_SENSOR_OFFSET);
9557 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9558                           TG3_TEMP_CAUTION_OFFSET);
9559 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9560                           TG3_TEMP_MAX_OFFSET);
9561
9562 static struct attribute *tg3_attributes[] = {
9563         &sensor_dev_attr_temp1_input.dev_attr.attr,
9564         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9565         &sensor_dev_attr_temp1_max.dev_attr.attr,
9566         NULL
9567 };
9568
9569 static const struct attribute_group tg3_group = {
9570         .attrs = tg3_attributes,
9571 };
9572
9573 #endif
9574
9575 static void tg3_hwmon_close(struct tg3 *tp)
9576 {
9577 #if IS_ENABLED(CONFIG_HWMON)
9578         if (tp->hwmon_dev) {
9579                 hwmon_device_unregister(tp->hwmon_dev);
9580                 tp->hwmon_dev = NULL;
9581                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9582         }
9583 #endif
9584 }
9585
9586 static void tg3_hwmon_open(struct tg3 *tp)
9587 {
9588 #if IS_ENABLED(CONFIG_HWMON)
9589         int i, err;
9590         u32 size = 0;
9591         struct pci_dev *pdev = tp->pdev;
9592         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9593
9594         tg3_sd_scan_scratchpad(tp, ocirs);
9595
9596         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9597                 if (!ocirs[i].src_data_length)
9598                         continue;
9599
9600                 size += ocirs[i].src_hdr_length;
9601                 size += ocirs[i].src_data_length;
9602         }
9603
9604         if (!size)
9605                 return;
9606
9607         /* Register hwmon sysfs hooks */
9608         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9609         if (err) {
9610                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9611                 return;
9612         }
9613
9614         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9615         if (IS_ERR(tp->hwmon_dev)) {
9616                 tp->hwmon_dev = NULL;
9617                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9618                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9619         }
9620 #endif
9621 }
9622
9623
9624 #define TG3_STAT_ADD32(PSTAT, REG) \
9625 do {    u32 __val = tr32(REG); \
9626         (PSTAT)->low += __val; \
9627         if ((PSTAT)->low < __val) \
9628                 (PSTAT)->high += 1; \
9629 } while (0)
9630
9631 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9632 {
9633         struct tg3_hw_stats *sp = tp->hw_stats;
9634
9635         if (!netif_carrier_ok(tp->dev))
9636                 return;
9637
9638         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9639         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9640         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9641         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9642         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9643         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9644         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9645         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9646         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9647         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9648         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9649         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9650         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9651         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9652                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9653                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9654                 u32 val;
9655
9656                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9657                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9658                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9659                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9660         }
9661
9662         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9663         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9664         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9665         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9666         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9667         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9668         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9669         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9670         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9671         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9672         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9673         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9674         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9675         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9676
9677         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9678         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9679             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9680             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9681                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9682         } else {
9683                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9684                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9685                 if (val) {
9686                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9687                         sp->rx_discards.low += val;
9688                         if (sp->rx_discards.low < val)
9689                                 sp->rx_discards.high += 1;
9690                 }
9691                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9692         }
9693         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9694 }
9695
9696 static void tg3_chk_missed_msi(struct tg3 *tp)
9697 {
9698         u32 i;
9699
9700         for (i = 0; i < tp->irq_cnt; i++) {
9701                 struct tg3_napi *tnapi = &tp->napi[i];
9702
9703                 if (tg3_has_work(tnapi)) {
9704                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9705                             tnapi->last_tx_cons == tnapi->tx_cons) {
9706                                 if (tnapi->chk_msi_cnt < 1) {
9707                                         tnapi->chk_msi_cnt++;
9708                                         return;
9709                                 }
9710                                 tg3_msi(0, tnapi);
9711                         }
9712                 }
9713                 tnapi->chk_msi_cnt = 0;
9714                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9715                 tnapi->last_tx_cons = tnapi->tx_cons;
9716         }
9717 }
9718
9719 static void tg3_timer(unsigned long __opaque)
9720 {
9721         struct tg3 *tp = (struct tg3 *) __opaque;
9722
9723         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9724                 goto restart_timer;
9725
9726         spin_lock(&tp->lock);
9727
9728         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9729             tg3_flag(tp, 57765_CLASS))
9730                 tg3_chk_missed_msi(tp);
9731
9732         if (!tg3_flag(tp, TAGGED_STATUS)) {
9733                 /* All of this garbage is because when using non-tagged
9734                  * IRQ status the mailbox/status_block protocol the chip
9735                  * uses with the cpu is race prone.
9736                  */
9737                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9738                         tw32(GRC_LOCAL_CTRL,
9739                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9740                 } else {
9741                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9742                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9743                 }
9744
9745                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9746                         spin_unlock(&tp->lock);
9747                         tg3_reset_task_schedule(tp);
9748                         goto restart_timer;
9749                 }
9750         }
9751
9752         /* This part only runs once per second. */
9753         if (!--tp->timer_counter) {
9754                 if (tg3_flag(tp, 5705_PLUS))
9755                         tg3_periodic_fetch_stats(tp);
9756
9757                 if (tp->setlpicnt && !--tp->setlpicnt)
9758                         tg3_phy_eee_enable(tp);
9759
9760                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9761                         u32 mac_stat;
9762                         int phy_event;
9763
9764                         mac_stat = tr32(MAC_STATUS);
9765
9766                         phy_event = 0;
9767                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9768                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9769                                         phy_event = 1;
9770                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9771                                 phy_event = 1;
9772
9773                         if (phy_event)
9774                                 tg3_setup_phy(tp, 0);
9775                 } else if (tg3_flag(tp, POLL_SERDES)) {
9776                         u32 mac_stat = tr32(MAC_STATUS);
9777                         int need_setup = 0;
9778
9779                         if (netif_carrier_ok(tp->dev) &&
9780                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9781                                 need_setup = 1;
9782                         }
9783                         if (!netif_carrier_ok(tp->dev) &&
9784                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9785                                          MAC_STATUS_SIGNAL_DET))) {
9786                                 need_setup = 1;
9787                         }
9788                         if (need_setup) {
9789                                 if (!tp->serdes_counter) {
9790                                         tw32_f(MAC_MODE,
9791                                              (tp->mac_mode &
9792                                               ~MAC_MODE_PORT_MODE_MASK));
9793                                         udelay(40);
9794                                         tw32_f(MAC_MODE, tp->mac_mode);
9795                                         udelay(40);
9796                                 }
9797                                 tg3_setup_phy(tp, 0);
9798                         }
9799                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9800                            tg3_flag(tp, 5780_CLASS)) {
9801                         tg3_serdes_parallel_detect(tp);
9802                 }
9803
9804                 tp->timer_counter = tp->timer_multiplier;
9805         }
9806
9807         /* Heartbeat is only sent once every 2 seconds.
9808          *
9809          * The heartbeat is to tell the ASF firmware that the host
9810          * driver is still alive.  In the event that the OS crashes,
9811          * ASF needs to reset the hardware to free up the FIFO space
9812          * that may be filled with rx packets destined for the host.
9813          * If the FIFO is full, ASF will no longer function properly.
9814          *
9815          * Unintended resets have been reported on real time kernels
9816          * where the timer doesn't run on time.  Netpoll will also have
9817          * same problem.
9818          *
9819          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9820          * to check the ring condition when the heartbeat is expiring
9821          * before doing the reset.  This will prevent most unintended
9822          * resets.
9823          */
9824         if (!--tp->asf_counter) {
9825                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9826                         tg3_wait_for_event_ack(tp);
9827
9828                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9829                                       FWCMD_NICDRV_ALIVE3);
9830                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9831                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9832                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9833
9834                         tg3_generate_fw_event(tp);
9835                 }
9836                 tp->asf_counter = tp->asf_multiplier;
9837         }
9838
9839         spin_unlock(&tp->lock);
9840
9841 restart_timer:
9842         tp->timer.expires = jiffies + tp->timer_offset;
9843         add_timer(&tp->timer);
9844 }
9845
9846 static void __devinit tg3_timer_init(struct tg3 *tp)
9847 {
9848         if (tg3_flag(tp, TAGGED_STATUS) &&
9849             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9850             !tg3_flag(tp, 57765_CLASS))
9851                 tp->timer_offset = HZ;
9852         else
9853                 tp->timer_offset = HZ / 10;
9854
9855         BUG_ON(tp->timer_offset > HZ);
9856
9857         tp->timer_multiplier = (HZ / tp->timer_offset);
9858         tp->asf_multiplier = (HZ / tp->timer_offset) *
9859                              TG3_FW_UPDATE_FREQ_SEC;
9860
9861         init_timer(&tp->timer);
9862         tp->timer.data = (unsigned long) tp;
9863         tp->timer.function = tg3_timer;
9864 }
9865
9866 static void tg3_timer_start(struct tg3 *tp)
9867 {
9868         tp->asf_counter   = tp->asf_multiplier;
9869         tp->timer_counter = tp->timer_multiplier;
9870
9871         tp->timer.expires = jiffies + tp->timer_offset;
9872         add_timer(&tp->timer);
9873 }
9874
9875 static void tg3_timer_stop(struct tg3 *tp)
9876 {
9877         del_timer_sync(&tp->timer);
9878 }
9879
9880 /* Restart hardware after configuration changes, self-test, etc.
9881  * Invoked with tp->lock held.
9882  */
9883 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9884         __releases(tp->lock)
9885         __acquires(tp->lock)
9886 {
9887         int err;
9888
9889         err = tg3_init_hw(tp, reset_phy);
9890         if (err) {
9891                 netdev_err(tp->dev,
9892                            "Failed to re-initialize device, aborting\n");
9893                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9894                 tg3_full_unlock(tp);
9895                 tg3_timer_stop(tp);
9896                 tp->irq_sync = 0;
9897                 tg3_napi_enable(tp);
9898                 dev_close(tp->dev);
9899                 tg3_full_lock(tp, 0);
9900         }
9901         return err;
9902 }
9903
9904 static void tg3_reset_task(struct work_struct *work)
9905 {
9906         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9907         int err;
9908
9909         tg3_full_lock(tp, 0);
9910
9911         if (!netif_running(tp->dev)) {
9912                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9913                 tg3_full_unlock(tp);
9914                 return;
9915         }
9916
9917         tg3_full_unlock(tp);
9918
9919         tg3_phy_stop(tp);
9920
9921         tg3_netif_stop(tp);
9922
9923         tg3_full_lock(tp, 1);
9924
9925         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9926                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9927                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9928                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9929                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9930         }
9931
9932         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9933         err = tg3_init_hw(tp, 1);
9934         if (err)
9935                 goto out;
9936
9937         tg3_netif_start(tp);
9938
9939 out:
9940         tg3_full_unlock(tp);
9941
9942         if (!err)
9943                 tg3_phy_start(tp);
9944
9945         tg3_flag_clear(tp, RESET_TASK_PENDING);
9946 }
9947
9948 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9949 {
9950         irq_handler_t fn;
9951         unsigned long flags;
9952         char *name;
9953         struct tg3_napi *tnapi = &tp->napi[irq_num];
9954
9955         if (tp->irq_cnt == 1)
9956                 name = tp->dev->name;
9957         else {
9958                 name = &tnapi->irq_lbl[0];
9959                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9960                 name[IFNAMSIZ-1] = 0;
9961         }
9962
9963         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9964                 fn = tg3_msi;
9965                 if (tg3_flag(tp, 1SHOT_MSI))
9966                         fn = tg3_msi_1shot;
9967                 flags = 0;
9968         } else {
9969                 fn = tg3_interrupt;
9970                 if (tg3_flag(tp, TAGGED_STATUS))
9971                         fn = tg3_interrupt_tagged;
9972                 flags = IRQF_SHARED;
9973         }
9974
9975         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9976 }
9977
9978 static int tg3_test_interrupt(struct tg3 *tp)
9979 {
9980         struct tg3_napi *tnapi = &tp->napi[0];
9981         struct net_device *dev = tp->dev;
9982         int err, i, intr_ok = 0;
9983         u32 val;
9984
9985         if (!netif_running(dev))
9986                 return -ENODEV;
9987
9988         tg3_disable_ints(tp);
9989
9990         free_irq(tnapi->irq_vec, tnapi);
9991
9992         /*
9993          * Turn off MSI one shot mode.  Otherwise this test has no
9994          * observable way to know whether the interrupt was delivered.
9995          */
9996         if (tg3_flag(tp, 57765_PLUS)) {
9997                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9998                 tw32(MSGINT_MODE, val);
9999         }
10000
10001         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10002                           IRQF_SHARED, dev->name, tnapi);
10003         if (err)
10004                 return err;
10005
10006         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10007         tg3_enable_ints(tp);
10008
10009         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10010                tnapi->coal_now);
10011
10012         for (i = 0; i < 5; i++) {
10013                 u32 int_mbox, misc_host_ctrl;
10014
10015                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10016                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10017
10018                 if ((int_mbox != 0) ||
10019                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10020                         intr_ok = 1;
10021                         break;
10022                 }
10023
10024                 if (tg3_flag(tp, 57765_PLUS) &&
10025                     tnapi->hw_status->status_tag != tnapi->last_tag)
10026                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10027
10028                 msleep(10);
10029         }
10030
10031         tg3_disable_ints(tp);
10032
10033         free_irq(tnapi->irq_vec, tnapi);
10034
10035         err = tg3_request_irq(tp, 0);
10036
10037         if (err)
10038                 return err;
10039
10040         if (intr_ok) {
10041                 /* Reenable MSI one shot mode. */
10042                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10043                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10044                         tw32(MSGINT_MODE, val);
10045                 }
10046                 return 0;
10047         }
10048
10049         return -EIO;
10050 }
10051
10052 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10053  * successfully restored
10054  */
10055 static int tg3_test_msi(struct tg3 *tp)
10056 {
10057         int err;
10058         u16 pci_cmd;
10059
10060         if (!tg3_flag(tp, USING_MSI))
10061                 return 0;
10062
10063         /* Turn off SERR reporting in case MSI terminates with Master
10064          * Abort.
10065          */
10066         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10067         pci_write_config_word(tp->pdev, PCI_COMMAND,
10068                               pci_cmd & ~PCI_COMMAND_SERR);
10069
10070         err = tg3_test_interrupt(tp);
10071
10072         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10073
10074         if (!err)
10075                 return 0;
10076
10077         /* other failures */
10078         if (err != -EIO)
10079                 return err;
10080
10081         /* MSI test failed, go back to INTx mode */
10082         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10083                     "to INTx mode. Please report this failure to the PCI "
10084                     "maintainer and include system chipset information\n");
10085
10086         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10087
10088         pci_disable_msi(tp->pdev);
10089
10090         tg3_flag_clear(tp, USING_MSI);
10091         tp->napi[0].irq_vec = tp->pdev->irq;
10092
10093         err = tg3_request_irq(tp, 0);
10094         if (err)
10095                 return err;
10096
10097         /* Need to reset the chip because the MSI cycle may have terminated
10098          * with Master Abort.
10099          */
10100         tg3_full_lock(tp, 1);
10101
10102         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10103         err = tg3_init_hw(tp, 1);
10104
10105         tg3_full_unlock(tp);
10106
10107         if (err)
10108                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10109
10110         return err;
10111 }
10112
10113 static int tg3_request_firmware(struct tg3 *tp)
10114 {
10115         const __be32 *fw_data;
10116
10117         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10118                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10119                            tp->fw_needed);
10120                 return -ENOENT;
10121         }
10122
10123         fw_data = (void *)tp->fw->data;
10124
10125         /* Firmware blob starts with version numbers, followed by
10126          * start address and _full_ length including BSS sections
10127          * (which must be longer than the actual data, of course
10128          */
10129
10130         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10131         if (tp->fw_len < (tp->fw->size - 12)) {
10132                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10133                            tp->fw_len, tp->fw_needed);
10134                 release_firmware(tp->fw);
10135                 tp->fw = NULL;
10136                 return -EINVAL;
10137         }
10138
10139         /* We no longer need firmware; we have it. */
10140         tp->fw_needed = NULL;
10141         return 0;
10142 }
10143
10144 static bool tg3_enable_msix(struct tg3 *tp)
10145 {
10146         int i, rc;
10147         struct msix_entry msix_ent[tp->irq_max];
10148
10149         tp->irq_cnt = netif_get_num_default_rss_queues();
10150         if (tp->irq_cnt > 1) {
10151                 /* We want as many rx rings enabled as there are cpus.
10152                  * In multiqueue MSI-X mode, the first MSI-X vector
10153                  * only deals with link interrupts, etc, so we add
10154                  * one to the number of vectors we are requesting.
10155                  */
10156                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
10157         }
10158
10159         for (i = 0; i < tp->irq_max; i++) {
10160                 msix_ent[i].entry  = i;
10161                 msix_ent[i].vector = 0;
10162         }
10163
10164         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10165         if (rc < 0) {
10166                 return false;
10167         } else if (rc != 0) {
10168                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10169                         return false;
10170                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10171                               tp->irq_cnt, rc);
10172                 tp->irq_cnt = rc;
10173         }
10174
10175         for (i = 0; i < tp->irq_max; i++)
10176                 tp->napi[i].irq_vec = msix_ent[i].vector;
10177
10178         netif_set_real_num_tx_queues(tp->dev, 1);
10179         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10180         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10181                 pci_disable_msix(tp->pdev);
10182                 return false;
10183         }
10184
10185         if (tp->irq_cnt > 1) {
10186                 tg3_flag_set(tp, ENABLE_RSS);
10187
10188                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
10190                         tg3_flag_set(tp, ENABLE_TSS);
10191                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
10192                 }
10193         }
10194
10195         return true;
10196 }
10197
10198 static void tg3_ints_init(struct tg3 *tp)
10199 {
10200         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10201             !tg3_flag(tp, TAGGED_STATUS)) {
10202                 /* All MSI supporting chips should support tagged
10203                  * status.  Assert that this is the case.
10204                  */
10205                 netdev_warn(tp->dev,
10206                             "MSI without TAGGED_STATUS? Not using MSI\n");
10207                 goto defcfg;
10208         }
10209
10210         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10211                 tg3_flag_set(tp, USING_MSIX);
10212         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10213                 tg3_flag_set(tp, USING_MSI);
10214
10215         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10216                 u32 msi_mode = tr32(MSGINT_MODE);
10217                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10218                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10219                 if (!tg3_flag(tp, 1SHOT_MSI))
10220                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10221                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10222         }
10223 defcfg:
10224         if (!tg3_flag(tp, USING_MSIX)) {
10225                 tp->irq_cnt = 1;
10226                 tp->napi[0].irq_vec = tp->pdev->irq;
10227                 netif_set_real_num_tx_queues(tp->dev, 1);
10228                 netif_set_real_num_rx_queues(tp->dev, 1);
10229         }
10230 }
10231
10232 static void tg3_ints_fini(struct tg3 *tp)
10233 {
10234         if (tg3_flag(tp, USING_MSIX))
10235                 pci_disable_msix(tp->pdev);
10236         else if (tg3_flag(tp, USING_MSI))
10237                 pci_disable_msi(tp->pdev);
10238         tg3_flag_clear(tp, USING_MSI);
10239         tg3_flag_clear(tp, USING_MSIX);
10240         tg3_flag_clear(tp, ENABLE_RSS);
10241         tg3_flag_clear(tp, ENABLE_TSS);
10242 }
10243
10244 static int tg3_open(struct net_device *dev)
10245 {
10246         struct tg3 *tp = netdev_priv(dev);
10247         int i, err;
10248
10249         if (tp->fw_needed) {
10250                 err = tg3_request_firmware(tp);
10251                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10252                         if (err)
10253                                 return err;
10254                 } else if (err) {
10255                         netdev_warn(tp->dev, "TSO capability disabled\n");
10256                         tg3_flag_clear(tp, TSO_CAPABLE);
10257                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10258                         netdev_notice(tp->dev, "TSO capability restored\n");
10259                         tg3_flag_set(tp, TSO_CAPABLE);
10260                 }
10261         }
10262
10263         netif_carrier_off(tp->dev);
10264
10265         err = tg3_power_up(tp);
10266         if (err)
10267                 return err;
10268
10269         tg3_full_lock(tp, 0);
10270
10271         tg3_disable_ints(tp);
10272         tg3_flag_clear(tp, INIT_COMPLETE);
10273
10274         tg3_full_unlock(tp);
10275
10276         /*
10277          * Setup interrupts first so we know how
10278          * many NAPI resources to allocate
10279          */
10280         tg3_ints_init(tp);
10281
10282         tg3_rss_check_indir_tbl(tp);
10283
10284         /* The placement of this call is tied
10285          * to the setup and use of Host TX descriptors.
10286          */
10287         err = tg3_alloc_consistent(tp);
10288         if (err)
10289                 goto err_out1;
10290
10291         tg3_napi_init(tp);
10292
10293         tg3_napi_enable(tp);
10294
10295         for (i = 0; i < tp->irq_cnt; i++) {
10296                 struct tg3_napi *tnapi = &tp->napi[i];
10297                 err = tg3_request_irq(tp, i);
10298                 if (err) {
10299                         for (i--; i >= 0; i--) {
10300                                 tnapi = &tp->napi[i];
10301                                 free_irq(tnapi->irq_vec, tnapi);
10302                         }
10303                         goto err_out2;
10304                 }
10305         }
10306
10307         tg3_full_lock(tp, 0);
10308
10309         err = tg3_init_hw(tp, 1);
10310         if (err) {
10311                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10312                 tg3_free_rings(tp);
10313         }
10314
10315         tg3_full_unlock(tp);
10316
10317         if (err)
10318                 goto err_out3;
10319
10320         if (tg3_flag(tp, USING_MSI)) {
10321                 err = tg3_test_msi(tp);
10322
10323                 if (err) {
10324                         tg3_full_lock(tp, 0);
10325                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10326                         tg3_free_rings(tp);
10327                         tg3_full_unlock(tp);
10328
10329                         goto err_out2;
10330                 }
10331
10332                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10333                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10334
10335                         tw32(PCIE_TRANSACTION_CFG,
10336                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10337                 }
10338         }
10339
10340         tg3_phy_start(tp);
10341
10342         tg3_hwmon_open(tp);
10343
10344         tg3_full_lock(tp, 0);
10345
10346         tg3_timer_start(tp);
10347         tg3_flag_set(tp, INIT_COMPLETE);
10348         tg3_enable_ints(tp);
10349
10350         tg3_full_unlock(tp);
10351
10352         netif_tx_start_all_queues(dev);
10353
10354         /*
10355          * Reset loopback feature if it was turned on while the device was down
10356          * make sure that it's installed properly now.
10357          */
10358         if (dev->features & NETIF_F_LOOPBACK)
10359                 tg3_set_loopback(dev, dev->features);
10360
10361         return 0;
10362
10363 err_out3:
10364         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10365                 struct tg3_napi *tnapi = &tp->napi[i];
10366                 free_irq(tnapi->irq_vec, tnapi);
10367         }
10368
10369 err_out2:
10370         tg3_napi_disable(tp);
10371         tg3_napi_fini(tp);
10372         tg3_free_consistent(tp);
10373
10374 err_out1:
10375         tg3_ints_fini(tp);
10376         tg3_frob_aux_power(tp, false);
10377         pci_set_power_state(tp->pdev, PCI_D3hot);
10378         return err;
10379 }
10380
10381 static int tg3_close(struct net_device *dev)
10382 {
10383         int i;
10384         struct tg3 *tp = netdev_priv(dev);
10385
10386         tg3_napi_disable(tp);
10387         tg3_reset_task_cancel(tp);
10388
10389         netif_tx_stop_all_queues(dev);
10390
10391         tg3_timer_stop(tp);
10392
10393         tg3_hwmon_close(tp);
10394
10395         tg3_phy_stop(tp);
10396
10397         tg3_full_lock(tp, 1);
10398
10399         tg3_disable_ints(tp);
10400
10401         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10402         tg3_free_rings(tp);
10403         tg3_flag_clear(tp, INIT_COMPLETE);
10404
10405         tg3_full_unlock(tp);
10406
10407         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10408                 struct tg3_napi *tnapi = &tp->napi[i];
10409                 free_irq(tnapi->irq_vec, tnapi);
10410         }
10411
10412         tg3_ints_fini(tp);
10413
10414         /* Clear stats across close / open calls */
10415         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10416         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10417
10418         tg3_napi_fini(tp);
10419
10420         tg3_free_consistent(tp);
10421
10422         tg3_power_down(tp);
10423
10424         netif_carrier_off(tp->dev);
10425
10426         return 0;
10427 }
10428
10429 static inline u64 get_stat64(tg3_stat64_t *val)
10430 {
10431        return ((u64)val->high << 32) | ((u64)val->low);
10432 }
10433
10434 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10435 {
10436         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10437
10438         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10439             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10440              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10441                 u32 val;
10442
10443                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10444                         tg3_writephy(tp, MII_TG3_TEST1,
10445                                      val | MII_TG3_TEST1_CRC_EN);
10446                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10447                 } else
10448                         val = 0;
10449
10450                 tp->phy_crc_errors += val;
10451
10452                 return tp->phy_crc_errors;
10453         }
10454
10455         return get_stat64(&hw_stats->rx_fcs_errors);
10456 }
10457
10458 #define ESTAT_ADD(member) \
10459         estats->member =        old_estats->member + \
10460                                 get_stat64(&hw_stats->member)
10461
10462 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10463 {
10464         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10465         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10466
10467         ESTAT_ADD(rx_octets);
10468         ESTAT_ADD(rx_fragments);
10469         ESTAT_ADD(rx_ucast_packets);
10470         ESTAT_ADD(rx_mcast_packets);
10471         ESTAT_ADD(rx_bcast_packets);
10472         ESTAT_ADD(rx_fcs_errors);
10473         ESTAT_ADD(rx_align_errors);
10474         ESTAT_ADD(rx_xon_pause_rcvd);
10475         ESTAT_ADD(rx_xoff_pause_rcvd);
10476         ESTAT_ADD(rx_mac_ctrl_rcvd);
10477         ESTAT_ADD(rx_xoff_entered);
10478         ESTAT_ADD(rx_frame_too_long_errors);
10479         ESTAT_ADD(rx_jabbers);
10480         ESTAT_ADD(rx_undersize_packets);
10481         ESTAT_ADD(rx_in_length_errors);
10482         ESTAT_ADD(rx_out_length_errors);
10483         ESTAT_ADD(rx_64_or_less_octet_packets);
10484         ESTAT_ADD(rx_65_to_127_octet_packets);
10485         ESTAT_ADD(rx_128_to_255_octet_packets);
10486         ESTAT_ADD(rx_256_to_511_octet_packets);
10487         ESTAT_ADD(rx_512_to_1023_octet_packets);
10488         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10489         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10490         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10491         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10492         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10493
10494         ESTAT_ADD(tx_octets);
10495         ESTAT_ADD(tx_collisions);
10496         ESTAT_ADD(tx_xon_sent);
10497         ESTAT_ADD(tx_xoff_sent);
10498         ESTAT_ADD(tx_flow_control);
10499         ESTAT_ADD(tx_mac_errors);
10500         ESTAT_ADD(tx_single_collisions);
10501         ESTAT_ADD(tx_mult_collisions);
10502         ESTAT_ADD(tx_deferred);
10503         ESTAT_ADD(tx_excessive_collisions);
10504         ESTAT_ADD(tx_late_collisions);
10505         ESTAT_ADD(tx_collide_2times);
10506         ESTAT_ADD(tx_collide_3times);
10507         ESTAT_ADD(tx_collide_4times);
10508         ESTAT_ADD(tx_collide_5times);
10509         ESTAT_ADD(tx_collide_6times);
10510         ESTAT_ADD(tx_collide_7times);
10511         ESTAT_ADD(tx_collide_8times);
10512         ESTAT_ADD(tx_collide_9times);
10513         ESTAT_ADD(tx_collide_10times);
10514         ESTAT_ADD(tx_collide_11times);
10515         ESTAT_ADD(tx_collide_12times);
10516         ESTAT_ADD(tx_collide_13times);
10517         ESTAT_ADD(tx_collide_14times);
10518         ESTAT_ADD(tx_collide_15times);
10519         ESTAT_ADD(tx_ucast_packets);
10520         ESTAT_ADD(tx_mcast_packets);
10521         ESTAT_ADD(tx_bcast_packets);
10522         ESTAT_ADD(tx_carrier_sense_errors);
10523         ESTAT_ADD(tx_discards);
10524         ESTAT_ADD(tx_errors);
10525
10526         ESTAT_ADD(dma_writeq_full);
10527         ESTAT_ADD(dma_write_prioq_full);
10528         ESTAT_ADD(rxbds_empty);
10529         ESTAT_ADD(rx_discards);
10530         ESTAT_ADD(rx_errors);
10531         ESTAT_ADD(rx_threshold_hit);
10532
10533         ESTAT_ADD(dma_readq_full);
10534         ESTAT_ADD(dma_read_prioq_full);
10535         ESTAT_ADD(tx_comp_queue_full);
10536
10537         ESTAT_ADD(ring_set_send_prod_index);
10538         ESTAT_ADD(ring_status_update);
10539         ESTAT_ADD(nic_irqs);
10540         ESTAT_ADD(nic_avoided_irqs);
10541         ESTAT_ADD(nic_tx_threshold_hit);
10542
10543         ESTAT_ADD(mbuf_lwm_thresh_hit);
10544 }
10545
10546 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10547 {
10548         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10549         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10550
10551         stats->rx_packets = old_stats->rx_packets +
10552                 get_stat64(&hw_stats->rx_ucast_packets) +
10553                 get_stat64(&hw_stats->rx_mcast_packets) +
10554                 get_stat64(&hw_stats->rx_bcast_packets);
10555
10556         stats->tx_packets = old_stats->tx_packets +
10557                 get_stat64(&hw_stats->tx_ucast_packets) +
10558                 get_stat64(&hw_stats->tx_mcast_packets) +
10559                 get_stat64(&hw_stats->tx_bcast_packets);
10560
10561         stats->rx_bytes = old_stats->rx_bytes +
10562                 get_stat64(&hw_stats->rx_octets);
10563         stats->tx_bytes = old_stats->tx_bytes +
10564                 get_stat64(&hw_stats->tx_octets);
10565
10566         stats->rx_errors = old_stats->rx_errors +
10567                 get_stat64(&hw_stats->rx_errors);
10568         stats->tx_errors = old_stats->tx_errors +
10569                 get_stat64(&hw_stats->tx_errors) +
10570                 get_stat64(&hw_stats->tx_mac_errors) +
10571                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10572                 get_stat64(&hw_stats->tx_discards);
10573
10574         stats->multicast = old_stats->multicast +
10575                 get_stat64(&hw_stats->rx_mcast_packets);
10576         stats->collisions = old_stats->collisions +
10577                 get_stat64(&hw_stats->tx_collisions);
10578
10579         stats->rx_length_errors = old_stats->rx_length_errors +
10580                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10581                 get_stat64(&hw_stats->rx_undersize_packets);
10582
10583         stats->rx_over_errors = old_stats->rx_over_errors +
10584                 get_stat64(&hw_stats->rxbds_empty);
10585         stats->rx_frame_errors = old_stats->rx_frame_errors +
10586                 get_stat64(&hw_stats->rx_align_errors);
10587         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10588                 get_stat64(&hw_stats->tx_discards);
10589         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10590                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10591
10592         stats->rx_crc_errors = old_stats->rx_crc_errors +
10593                 tg3_calc_crc_errors(tp);
10594
10595         stats->rx_missed_errors = old_stats->rx_missed_errors +
10596                 get_stat64(&hw_stats->rx_discards);
10597
10598         stats->rx_dropped = tp->rx_dropped;
10599         stats->tx_dropped = tp->tx_dropped;
10600 }
10601
10602 static int tg3_get_regs_len(struct net_device *dev)
10603 {
10604         return TG3_REG_BLK_SIZE;
10605 }
10606
10607 static void tg3_get_regs(struct net_device *dev,
10608                 struct ethtool_regs *regs, void *_p)
10609 {
10610         struct tg3 *tp = netdev_priv(dev);
10611
10612         regs->version = 0;
10613
10614         memset(_p, 0, TG3_REG_BLK_SIZE);
10615
10616         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10617                 return;
10618
10619         tg3_full_lock(tp, 0);
10620
10621         tg3_dump_legacy_regs(tp, (u32 *)_p);
10622
10623         tg3_full_unlock(tp);
10624 }
10625
10626 static int tg3_get_eeprom_len(struct net_device *dev)
10627 {
10628         struct tg3 *tp = netdev_priv(dev);
10629
10630         return tp->nvram_size;
10631 }
10632
10633 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10634 {
10635         struct tg3 *tp = netdev_priv(dev);
10636         int ret;
10637         u8  *pd;
10638         u32 i, offset, len, b_offset, b_count;
10639         __be32 val;
10640
10641         if (tg3_flag(tp, NO_NVRAM))
10642                 return -EINVAL;
10643
10644         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10645                 return -EAGAIN;
10646
10647         offset = eeprom->offset;
10648         len = eeprom->len;
10649         eeprom->len = 0;
10650
10651         eeprom->magic = TG3_EEPROM_MAGIC;
10652
10653         if (offset & 3) {
10654                 /* adjustments to start on required 4 byte boundary */
10655                 b_offset = offset & 3;
10656                 b_count = 4 - b_offset;
10657                 if (b_count > len) {
10658                         /* i.e. offset=1 len=2 */
10659                         b_count = len;
10660                 }
10661                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10662                 if (ret)
10663                         return ret;
10664                 memcpy(data, ((char *)&val) + b_offset, b_count);
10665                 len -= b_count;
10666                 offset += b_count;
10667                 eeprom->len += b_count;
10668         }
10669
10670         /* read bytes up to the last 4 byte boundary */
10671         pd = &data[eeprom->len];
10672         for (i = 0; i < (len - (len & 3)); i += 4) {
10673                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10674                 if (ret) {
10675                         eeprom->len += i;
10676                         return ret;
10677                 }
10678                 memcpy(pd + i, &val, 4);
10679         }
10680         eeprom->len += i;
10681
10682         if (len & 3) {
10683                 /* read last bytes not ending on 4 byte boundary */
10684                 pd = &data[eeprom->len];
10685                 b_count = len & 3;
10686                 b_offset = offset + len - b_count;
10687                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10688                 if (ret)
10689                         return ret;
10690                 memcpy(pd, &val, b_count);
10691                 eeprom->len += b_count;
10692         }
10693         return 0;
10694 }
10695
10696 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10697 {
10698         struct tg3 *tp = netdev_priv(dev);
10699         int ret;
10700         u32 offset, len, b_offset, odd_len;
10701         u8 *buf;
10702         __be32 start, end;
10703
10704         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10705                 return -EAGAIN;
10706
10707         if (tg3_flag(tp, NO_NVRAM) ||
10708             eeprom->magic != TG3_EEPROM_MAGIC)
10709                 return -EINVAL;
10710
10711         offset = eeprom->offset;
10712         len = eeprom->len;
10713
10714         if ((b_offset = (offset & 3))) {
10715                 /* adjustments to start on required 4 byte boundary */
10716                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10717                 if (ret)
10718                         return ret;
10719                 len += b_offset;
10720                 offset &= ~3;
10721                 if (len < 4)
10722                         len = 4;
10723         }
10724
10725         odd_len = 0;
10726         if (len & 3) {
10727                 /* adjustments to end on required 4 byte boundary */
10728                 odd_len = 1;
10729                 len = (len + 3) & ~3;
10730                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10731                 if (ret)
10732                         return ret;
10733         }
10734
10735         buf = data;
10736         if (b_offset || odd_len) {
10737                 buf = kmalloc(len, GFP_KERNEL);
10738                 if (!buf)
10739                         return -ENOMEM;
10740                 if (b_offset)
10741                         memcpy(buf, &start, 4);
10742                 if (odd_len)
10743                         memcpy(buf+len-4, &end, 4);
10744                 memcpy(buf + b_offset, data, eeprom->len);
10745         }
10746
10747         ret = tg3_nvram_write_block(tp, offset, len, buf);
10748
10749         if (buf != data)
10750                 kfree(buf);
10751
10752         return ret;
10753 }
10754
10755 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10756 {
10757         struct tg3 *tp = netdev_priv(dev);
10758
10759         if (tg3_flag(tp, USE_PHYLIB)) {
10760                 struct phy_device *phydev;
10761                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10762                         return -EAGAIN;
10763                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10764                 return phy_ethtool_gset(phydev, cmd);
10765         }
10766
10767         cmd->supported = (SUPPORTED_Autoneg);
10768
10769         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10770                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10771                                    SUPPORTED_1000baseT_Full);
10772
10773         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10774                 cmd->supported |= (SUPPORTED_100baseT_Half |
10775                                   SUPPORTED_100baseT_Full |
10776                                   SUPPORTED_10baseT_Half |
10777                                   SUPPORTED_10baseT_Full |
10778                                   SUPPORTED_TP);
10779                 cmd->port = PORT_TP;
10780         } else {
10781                 cmd->supported |= SUPPORTED_FIBRE;
10782                 cmd->port = PORT_FIBRE;
10783         }
10784
10785         cmd->advertising = tp->link_config.advertising;
10786         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10787                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10788                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10789                                 cmd->advertising |= ADVERTISED_Pause;
10790                         } else {
10791                                 cmd->advertising |= ADVERTISED_Pause |
10792                                                     ADVERTISED_Asym_Pause;
10793                         }
10794                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10795                         cmd->advertising |= ADVERTISED_Asym_Pause;
10796                 }
10797         }
10798         if (netif_running(dev) && netif_carrier_ok(dev)) {
10799                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10800                 cmd->duplex = tp->link_config.active_duplex;
10801                 cmd->lp_advertising = tp->link_config.rmt_adv;
10802                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10803                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10804                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10805                         else
10806                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10807                 }
10808         } else {
10809                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10810                 cmd->duplex = DUPLEX_UNKNOWN;
10811                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10812         }
10813         cmd->phy_address = tp->phy_addr;
10814         cmd->transceiver = XCVR_INTERNAL;
10815         cmd->autoneg = tp->link_config.autoneg;
10816         cmd->maxtxpkt = 0;
10817         cmd->maxrxpkt = 0;
10818         return 0;
10819 }
10820
10821 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10822 {
10823         struct tg3 *tp = netdev_priv(dev);
10824         u32 speed = ethtool_cmd_speed(cmd);
10825
10826         if (tg3_flag(tp, USE_PHYLIB)) {
10827                 struct phy_device *phydev;
10828                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10829                         return -EAGAIN;
10830                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10831                 return phy_ethtool_sset(phydev, cmd);
10832         }
10833
10834         if (cmd->autoneg != AUTONEG_ENABLE &&
10835             cmd->autoneg != AUTONEG_DISABLE)
10836                 return -EINVAL;
10837
10838         if (cmd->autoneg == AUTONEG_DISABLE &&
10839             cmd->duplex != DUPLEX_FULL &&
10840             cmd->duplex != DUPLEX_HALF)
10841                 return -EINVAL;
10842
10843         if (cmd->autoneg == AUTONEG_ENABLE) {
10844                 u32 mask = ADVERTISED_Autoneg |
10845                            ADVERTISED_Pause |
10846                            ADVERTISED_Asym_Pause;
10847
10848                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10849                         mask |= ADVERTISED_1000baseT_Half |
10850                                 ADVERTISED_1000baseT_Full;
10851
10852                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10853                         mask |= ADVERTISED_100baseT_Half |
10854                                 ADVERTISED_100baseT_Full |
10855                                 ADVERTISED_10baseT_Half |
10856                                 ADVERTISED_10baseT_Full |
10857                                 ADVERTISED_TP;
10858                 else
10859                         mask |= ADVERTISED_FIBRE;
10860
10861                 if (cmd->advertising & ~mask)
10862                         return -EINVAL;
10863
10864                 mask &= (ADVERTISED_1000baseT_Half |
10865                          ADVERTISED_1000baseT_Full |
10866                          ADVERTISED_100baseT_Half |
10867                          ADVERTISED_100baseT_Full |
10868                          ADVERTISED_10baseT_Half |
10869                          ADVERTISED_10baseT_Full);
10870
10871                 cmd->advertising &= mask;
10872         } else {
10873                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10874                         if (speed != SPEED_1000)
10875                                 return -EINVAL;
10876
10877                         if (cmd->duplex != DUPLEX_FULL)
10878                                 return -EINVAL;
10879                 } else {
10880                         if (speed != SPEED_100 &&
10881                             speed != SPEED_10)
10882                                 return -EINVAL;
10883                 }
10884         }
10885
10886         tg3_full_lock(tp, 0);
10887
10888         tp->link_config.autoneg = cmd->autoneg;
10889         if (cmd->autoneg == AUTONEG_ENABLE) {
10890                 tp->link_config.advertising = (cmd->advertising |
10891                                               ADVERTISED_Autoneg);
10892                 tp->link_config.speed = SPEED_UNKNOWN;
10893                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10894         } else {
10895                 tp->link_config.advertising = 0;
10896                 tp->link_config.speed = speed;
10897                 tp->link_config.duplex = cmd->duplex;
10898         }
10899
10900         if (netif_running(dev))
10901                 tg3_setup_phy(tp, 1);
10902
10903         tg3_full_unlock(tp);
10904
10905         return 0;
10906 }
10907
10908 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10909 {
10910         struct tg3 *tp = netdev_priv(dev);
10911
10912         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10913         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10914         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10915         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10916 }
10917
10918 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10919 {
10920         struct tg3 *tp = netdev_priv(dev);
10921
10922         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10923                 wol->supported = WAKE_MAGIC;
10924         else
10925                 wol->supported = 0;
10926         wol->wolopts = 0;
10927         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10928                 wol->wolopts = WAKE_MAGIC;
10929         memset(&wol->sopass, 0, sizeof(wol->sopass));
10930 }
10931
10932 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10933 {
10934         struct tg3 *tp = netdev_priv(dev);
10935         struct device *dp = &tp->pdev->dev;
10936
10937         if (wol->wolopts & ~WAKE_MAGIC)
10938                 return -EINVAL;
10939         if ((wol->wolopts & WAKE_MAGIC) &&
10940             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10941                 return -EINVAL;
10942
10943         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10944
10945         spin_lock_bh(&tp->lock);
10946         if (device_may_wakeup(dp))
10947                 tg3_flag_set(tp, WOL_ENABLE);
10948         else
10949                 tg3_flag_clear(tp, WOL_ENABLE);
10950         spin_unlock_bh(&tp->lock);
10951
10952         return 0;
10953 }
10954
10955 static u32 tg3_get_msglevel(struct net_device *dev)
10956 {
10957         struct tg3 *tp = netdev_priv(dev);
10958         return tp->msg_enable;
10959 }
10960
10961 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10962 {
10963         struct tg3 *tp = netdev_priv(dev);
10964         tp->msg_enable = value;
10965 }
10966
10967 static int tg3_nway_reset(struct net_device *dev)
10968 {
10969         struct tg3 *tp = netdev_priv(dev);
10970         int r;
10971
10972         if (!netif_running(dev))
10973                 return -EAGAIN;
10974
10975         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10976                 return -EINVAL;
10977
10978         if (tg3_flag(tp, USE_PHYLIB)) {
10979                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10980                         return -EAGAIN;
10981                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10982         } else {
10983                 u32 bmcr;
10984
10985                 spin_lock_bh(&tp->lock);
10986                 r = -EINVAL;
10987                 tg3_readphy(tp, MII_BMCR, &bmcr);
10988                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10989                     ((bmcr & BMCR_ANENABLE) ||
10990                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10991                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10992                                                    BMCR_ANENABLE);
10993                         r = 0;
10994                 }
10995                 spin_unlock_bh(&tp->lock);
10996         }
10997
10998         return r;
10999 }
11000
11001 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11002 {
11003         struct tg3 *tp = netdev_priv(dev);
11004
11005         ering->rx_max_pending = tp->rx_std_ring_mask;
11006         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11007                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11008         else
11009                 ering->rx_jumbo_max_pending = 0;
11010
11011         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11012
11013         ering->rx_pending = tp->rx_pending;
11014         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11015                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11016         else
11017                 ering->rx_jumbo_pending = 0;
11018
11019         ering->tx_pending = tp->napi[0].tx_pending;
11020 }
11021
11022 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11023 {
11024         struct tg3 *tp = netdev_priv(dev);
11025         int i, irq_sync = 0, err = 0;
11026
11027         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11028             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11029             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11030             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11031             (tg3_flag(tp, TSO_BUG) &&
11032              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11033                 return -EINVAL;
11034
11035         if (netif_running(dev)) {
11036                 tg3_phy_stop(tp);
11037                 tg3_netif_stop(tp);
11038                 irq_sync = 1;
11039         }
11040
11041         tg3_full_lock(tp, irq_sync);
11042
11043         tp->rx_pending = ering->rx_pending;
11044
11045         if (tg3_flag(tp, MAX_RXPEND_64) &&
11046             tp->rx_pending > 63)
11047                 tp->rx_pending = 63;
11048         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11049
11050         for (i = 0; i < tp->irq_max; i++)
11051                 tp->napi[i].tx_pending = ering->tx_pending;
11052
11053         if (netif_running(dev)) {
11054                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11055                 err = tg3_restart_hw(tp, 1);
11056                 if (!err)
11057                         tg3_netif_start(tp);
11058         }
11059
11060         tg3_full_unlock(tp);
11061
11062         if (irq_sync && !err)
11063                 tg3_phy_start(tp);
11064
11065         return err;
11066 }
11067
11068 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11069 {
11070         struct tg3 *tp = netdev_priv(dev);
11071
11072         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11073
11074         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11075                 epause->rx_pause = 1;
11076         else
11077                 epause->rx_pause = 0;
11078
11079         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11080                 epause->tx_pause = 1;
11081         else
11082                 epause->tx_pause = 0;
11083 }
11084
11085 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11086 {
11087         struct tg3 *tp = netdev_priv(dev);
11088         int err = 0;
11089
11090         if (tg3_flag(tp, USE_PHYLIB)) {
11091                 u32 newadv;
11092                 struct phy_device *phydev;
11093
11094                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11095
11096                 if (!(phydev->supported & SUPPORTED_Pause) ||
11097                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11098                      (epause->rx_pause != epause->tx_pause)))
11099                         return -EINVAL;
11100
11101                 tp->link_config.flowctrl = 0;
11102                 if (epause->rx_pause) {
11103                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11104
11105                         if (epause->tx_pause) {
11106                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11107                                 newadv = ADVERTISED_Pause;
11108                         } else
11109                                 newadv = ADVERTISED_Pause |
11110                                          ADVERTISED_Asym_Pause;
11111                 } else if (epause->tx_pause) {
11112                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11113                         newadv = ADVERTISED_Asym_Pause;
11114                 } else
11115                         newadv = 0;
11116
11117                 if (epause->autoneg)
11118                         tg3_flag_set(tp, PAUSE_AUTONEG);
11119                 else
11120                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11121
11122                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11123                         u32 oldadv = phydev->advertising &
11124                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11125                         if (oldadv != newadv) {
11126                                 phydev->advertising &=
11127                                         ~(ADVERTISED_Pause |
11128                                           ADVERTISED_Asym_Pause);
11129                                 phydev->advertising |= newadv;
11130                                 if (phydev->autoneg) {
11131                                         /*
11132                                          * Always renegotiate the link to
11133                                          * inform our link partner of our
11134                                          * flow control settings, even if the
11135                                          * flow control is forced.  Let
11136                                          * tg3_adjust_link() do the final
11137                                          * flow control setup.
11138                                          */
11139                                         return phy_start_aneg(phydev);
11140                                 }
11141                         }
11142
11143                         if (!epause->autoneg)
11144                                 tg3_setup_flow_control(tp, 0, 0);
11145                 } else {
11146                         tp->link_config.advertising &=
11147                                         ~(ADVERTISED_Pause |
11148                                           ADVERTISED_Asym_Pause);
11149                         tp->link_config.advertising |= newadv;
11150                 }
11151         } else {
11152                 int irq_sync = 0;
11153
11154                 if (netif_running(dev)) {
11155                         tg3_netif_stop(tp);
11156                         irq_sync = 1;
11157                 }
11158
11159                 tg3_full_lock(tp, irq_sync);
11160
11161                 if (epause->autoneg)
11162                         tg3_flag_set(tp, PAUSE_AUTONEG);
11163                 else
11164                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11165                 if (epause->rx_pause)
11166                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11167                 else
11168                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11169                 if (epause->tx_pause)
11170                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11171                 else
11172                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11173
11174                 if (netif_running(dev)) {
11175                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11176                         err = tg3_restart_hw(tp, 1);
11177                         if (!err)
11178                                 tg3_netif_start(tp);
11179                 }
11180
11181                 tg3_full_unlock(tp);
11182         }
11183
11184         return err;
11185 }
11186
11187 static int tg3_get_sset_count(struct net_device *dev, int sset)
11188 {
11189         switch (sset) {
11190         case ETH_SS_TEST:
11191                 return TG3_NUM_TEST;
11192         case ETH_SS_STATS:
11193                 return TG3_NUM_STATS;
11194         default:
11195                 return -EOPNOTSUPP;
11196         }
11197 }
11198
11199 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11200                          u32 *rules __always_unused)
11201 {
11202         struct tg3 *tp = netdev_priv(dev);
11203
11204         if (!tg3_flag(tp, SUPPORT_MSIX))
11205                 return -EOPNOTSUPP;
11206
11207         switch (info->cmd) {
11208         case ETHTOOL_GRXRINGS:
11209                 if (netif_running(tp->dev))
11210                         info->data = tp->irq_cnt;
11211                 else {
11212                         info->data = num_online_cpus();
11213                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
11214                                 info->data = TG3_IRQ_MAX_VECS_RSS;
11215                 }
11216
11217                 /* The first interrupt vector only
11218                  * handles link interrupts.
11219                  */
11220                 info->data -= 1;
11221                 return 0;
11222
11223         default:
11224                 return -EOPNOTSUPP;
11225         }
11226 }
11227
11228 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11229 {
11230         u32 size = 0;
11231         struct tg3 *tp = netdev_priv(dev);
11232
11233         if (tg3_flag(tp, SUPPORT_MSIX))
11234                 size = TG3_RSS_INDIR_TBL_SIZE;
11235
11236         return size;
11237 }
11238
11239 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11240 {
11241         struct tg3 *tp = netdev_priv(dev);
11242         int i;
11243
11244         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11245                 indir[i] = tp->rss_ind_tbl[i];
11246
11247         return 0;
11248 }
11249
11250 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11251 {
11252         struct tg3 *tp = netdev_priv(dev);
11253         size_t i;
11254
11255         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11256                 tp->rss_ind_tbl[i] = indir[i];
11257
11258         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11259                 return 0;
11260
11261         /* It is legal to write the indirection
11262          * table while the device is running.
11263          */
11264         tg3_full_lock(tp, 0);
11265         tg3_rss_write_indir_tbl(tp);
11266         tg3_full_unlock(tp);
11267
11268         return 0;
11269 }
11270
11271 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11272 {
11273         switch (stringset) {
11274         case ETH_SS_STATS:
11275                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11276                 break;
11277         case ETH_SS_TEST:
11278                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11279                 break;
11280         default:
11281                 WARN_ON(1);     /* we need a WARN() */
11282                 break;
11283         }
11284 }
11285
11286 static int tg3_set_phys_id(struct net_device *dev,
11287                             enum ethtool_phys_id_state state)
11288 {
11289         struct tg3 *tp = netdev_priv(dev);
11290
11291         if (!netif_running(tp->dev))
11292                 return -EAGAIN;
11293
11294         switch (state) {
11295         case ETHTOOL_ID_ACTIVE:
11296                 return 1;       /* cycle on/off once per second */
11297
11298         case ETHTOOL_ID_ON:
11299                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11300                      LED_CTRL_1000MBPS_ON |
11301                      LED_CTRL_100MBPS_ON |
11302                      LED_CTRL_10MBPS_ON |
11303                      LED_CTRL_TRAFFIC_OVERRIDE |
11304                      LED_CTRL_TRAFFIC_BLINK |
11305                      LED_CTRL_TRAFFIC_LED);
11306                 break;
11307
11308         case ETHTOOL_ID_OFF:
11309                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11310                      LED_CTRL_TRAFFIC_OVERRIDE);
11311                 break;
11312
11313         case ETHTOOL_ID_INACTIVE:
11314                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11315                 break;
11316         }
11317
11318         return 0;
11319 }
11320
11321 static void tg3_get_ethtool_stats(struct net_device *dev,
11322                                    struct ethtool_stats *estats, u64 *tmp_stats)
11323 {
11324         struct tg3 *tp = netdev_priv(dev);
11325
11326         if (tp->hw_stats)
11327                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11328         else
11329                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11330 }
11331
11332 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11333 {
11334         int i;
11335         __be32 *buf;
11336         u32 offset = 0, len = 0;
11337         u32 magic, val;
11338
11339         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11340                 return NULL;
11341
11342         if (magic == TG3_EEPROM_MAGIC) {
11343                 for (offset = TG3_NVM_DIR_START;
11344                      offset < TG3_NVM_DIR_END;
11345                      offset += TG3_NVM_DIRENT_SIZE) {
11346                         if (tg3_nvram_read(tp, offset, &val))
11347                                 return NULL;
11348
11349                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11350                             TG3_NVM_DIRTYPE_EXTVPD)
11351                                 break;
11352                 }
11353
11354                 if (offset != TG3_NVM_DIR_END) {
11355                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11356                         if (tg3_nvram_read(tp, offset + 4, &offset))
11357                                 return NULL;
11358
11359                         offset = tg3_nvram_logical_addr(tp, offset);
11360                 }
11361         }
11362
11363         if (!offset || !len) {
11364                 offset = TG3_NVM_VPD_OFF;
11365                 len = TG3_NVM_VPD_LEN;
11366         }
11367
11368         buf = kmalloc(len, GFP_KERNEL);
11369         if (buf == NULL)
11370                 return NULL;
11371
11372         if (magic == TG3_EEPROM_MAGIC) {
11373                 for (i = 0; i < len; i += 4) {
11374                         /* The data is in little-endian format in NVRAM.
11375                          * Use the big-endian read routines to preserve
11376                          * the byte order as it exists in NVRAM.
11377                          */
11378                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11379                                 goto error;
11380                 }
11381         } else {
11382                 u8 *ptr;
11383                 ssize_t cnt;
11384                 unsigned int pos = 0;
11385
11386                 ptr = (u8 *)&buf[0];
11387                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11388                         cnt = pci_read_vpd(tp->pdev, pos,
11389                                            len - pos, ptr);
11390                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11391                                 cnt = 0;
11392                         else if (cnt < 0)
11393                                 goto error;
11394                 }
11395                 if (pos != len)
11396                         goto error;
11397         }
11398
11399         *vpdlen = len;
11400
11401         return buf;
11402
11403 error:
11404         kfree(buf);
11405         return NULL;
11406 }
11407
11408 #define NVRAM_TEST_SIZE 0x100
11409 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11410 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11411 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11412 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11413 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11414 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11415 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11416 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11417
11418 static int tg3_test_nvram(struct tg3 *tp)
11419 {
11420         u32 csum, magic, len;
11421         __be32 *buf;
11422         int i, j, k, err = 0, size;
11423
11424         if (tg3_flag(tp, NO_NVRAM))
11425                 return 0;
11426
11427         if (tg3_nvram_read(tp, 0, &magic) != 0)
11428                 return -EIO;
11429
11430         if (magic == TG3_EEPROM_MAGIC)
11431                 size = NVRAM_TEST_SIZE;
11432         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11433                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11434                     TG3_EEPROM_SB_FORMAT_1) {
11435                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11436                         case TG3_EEPROM_SB_REVISION_0:
11437                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11438                                 break;
11439                         case TG3_EEPROM_SB_REVISION_2:
11440                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11441                                 break;
11442                         case TG3_EEPROM_SB_REVISION_3:
11443                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11444                                 break;
11445                         case TG3_EEPROM_SB_REVISION_4:
11446                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11447                                 break;
11448                         case TG3_EEPROM_SB_REVISION_5:
11449                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11450                                 break;
11451                         case TG3_EEPROM_SB_REVISION_6:
11452                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11453                                 break;
11454                         default:
11455                                 return -EIO;
11456                         }
11457                 } else
11458                         return 0;
11459         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11460                 size = NVRAM_SELFBOOT_HW_SIZE;
11461         else
11462                 return -EIO;
11463
11464         buf = kmalloc(size, GFP_KERNEL);
11465         if (buf == NULL)
11466                 return -ENOMEM;
11467
11468         err = -EIO;
11469         for (i = 0, j = 0; i < size; i += 4, j++) {
11470                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11471                 if (err)
11472                         break;
11473         }
11474         if (i < size)
11475                 goto out;
11476
11477         /* Selfboot format */
11478         magic = be32_to_cpu(buf[0]);
11479         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11480             TG3_EEPROM_MAGIC_FW) {
11481                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11482
11483                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11484                     TG3_EEPROM_SB_REVISION_2) {
11485                         /* For rev 2, the csum doesn't include the MBA. */
11486                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11487                                 csum8 += buf8[i];
11488                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11489                                 csum8 += buf8[i];
11490                 } else {
11491                         for (i = 0; i < size; i++)
11492                                 csum8 += buf8[i];
11493                 }
11494
11495                 if (csum8 == 0) {
11496                         err = 0;
11497                         goto out;
11498                 }
11499
11500                 err = -EIO;
11501                 goto out;
11502         }
11503
11504         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11505             TG3_EEPROM_MAGIC_HW) {
11506                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11507                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11508                 u8 *buf8 = (u8 *) buf;
11509
11510                 /* Separate the parity bits and the data bytes.  */
11511                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11512                         if ((i == 0) || (i == 8)) {
11513                                 int l;
11514                                 u8 msk;
11515
11516                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11517                                         parity[k++] = buf8[i] & msk;
11518                                 i++;
11519                         } else if (i == 16) {
11520                                 int l;
11521                                 u8 msk;
11522
11523                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11524                                         parity[k++] = buf8[i] & msk;
11525                                 i++;
11526
11527                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11528                                         parity[k++] = buf8[i] & msk;
11529                                 i++;
11530                         }
11531                         data[j++] = buf8[i];
11532                 }
11533
11534                 err = -EIO;
11535                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11536                         u8 hw8 = hweight8(data[i]);
11537
11538                         if ((hw8 & 0x1) && parity[i])
11539                                 goto out;
11540                         else if (!(hw8 & 0x1) && !parity[i])
11541                                 goto out;
11542                 }
11543                 err = 0;
11544                 goto out;
11545         }
11546
11547         err = -EIO;
11548
11549         /* Bootstrap checksum at offset 0x10 */
11550         csum = calc_crc((unsigned char *) buf, 0x10);
11551         if (csum != le32_to_cpu(buf[0x10/4]))
11552                 goto out;
11553
11554         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11555         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11556         if (csum != le32_to_cpu(buf[0xfc/4]))
11557                 goto out;
11558
11559         kfree(buf);
11560
11561         buf = tg3_vpd_readblock(tp, &len);
11562         if (!buf)
11563                 return -ENOMEM;
11564
11565         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11566         if (i > 0) {
11567                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11568                 if (j < 0)
11569                         goto out;
11570
11571                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11572                         goto out;
11573
11574                 i += PCI_VPD_LRDT_TAG_SIZE;
11575                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11576                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11577                 if (j > 0) {
11578                         u8 csum8 = 0;
11579
11580                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11581
11582                         for (i = 0; i <= j; i++)
11583                                 csum8 += ((u8 *)buf)[i];
11584
11585                         if (csum8)
11586                                 goto out;
11587                 }
11588         }
11589
11590         err = 0;
11591
11592 out:
11593         kfree(buf);
11594         return err;
11595 }
11596
11597 #define TG3_SERDES_TIMEOUT_SEC  2
11598 #define TG3_COPPER_TIMEOUT_SEC  6
11599
11600 static int tg3_test_link(struct tg3 *tp)
11601 {
11602         int i, max;
11603
11604         if (!netif_running(tp->dev))
11605                 return -ENODEV;
11606
11607         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11608                 max = TG3_SERDES_TIMEOUT_SEC;
11609         else
11610                 max = TG3_COPPER_TIMEOUT_SEC;
11611
11612         for (i = 0; i < max; i++) {
11613                 if (netif_carrier_ok(tp->dev))
11614                         return 0;
11615
11616                 if (msleep_interruptible(1000))
11617                         break;
11618         }
11619
11620         return -EIO;
11621 }
11622
11623 /* Only test the commonly used registers */
11624 static int tg3_test_registers(struct tg3 *tp)
11625 {
11626         int i, is_5705, is_5750;
11627         u32 offset, read_mask, write_mask, val, save_val, read_val;
11628         static struct {
11629                 u16 offset;
11630                 u16 flags;
11631 #define TG3_FL_5705     0x1
11632 #define TG3_FL_NOT_5705 0x2
11633 #define TG3_FL_NOT_5788 0x4
11634 #define TG3_FL_NOT_5750 0x8
11635                 u32 read_mask;
11636                 u32 write_mask;
11637         } reg_tbl[] = {
11638                 /* MAC Control Registers */
11639                 { MAC_MODE, TG3_FL_NOT_5705,
11640                         0x00000000, 0x00ef6f8c },
11641                 { MAC_MODE, TG3_FL_5705,
11642                         0x00000000, 0x01ef6b8c },
11643                 { MAC_STATUS, TG3_FL_NOT_5705,
11644                         0x03800107, 0x00000000 },
11645                 { MAC_STATUS, TG3_FL_5705,
11646                         0x03800100, 0x00000000 },
11647                 { MAC_ADDR_0_HIGH, 0x0000,
11648                         0x00000000, 0x0000ffff },
11649                 { MAC_ADDR_0_LOW, 0x0000,
11650                         0x00000000, 0xffffffff },
11651                 { MAC_RX_MTU_SIZE, 0x0000,
11652                         0x00000000, 0x0000ffff },
11653                 { MAC_TX_MODE, 0x0000,
11654                         0x00000000, 0x00000070 },
11655                 { MAC_TX_LENGTHS, 0x0000,
11656                         0x00000000, 0x00003fff },
11657                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11658                         0x00000000, 0x000007fc },
11659                 { MAC_RX_MODE, TG3_FL_5705,
11660                         0x00000000, 0x000007dc },
11661                 { MAC_HASH_REG_0, 0x0000,
11662                         0x00000000, 0xffffffff },
11663                 { MAC_HASH_REG_1, 0x0000,
11664                         0x00000000, 0xffffffff },
11665                 { MAC_HASH_REG_2, 0x0000,
11666                         0x00000000, 0xffffffff },
11667                 { MAC_HASH_REG_3, 0x0000,
11668                         0x00000000, 0xffffffff },
11669
11670                 /* Receive Data and Receive BD Initiator Control Registers. */
11671                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11672                         0x00000000, 0xffffffff },
11673                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11674                         0x00000000, 0xffffffff },
11675                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11676                         0x00000000, 0x00000003 },
11677                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11678                         0x00000000, 0xffffffff },
11679                 { RCVDBDI_STD_BD+0, 0x0000,
11680                         0x00000000, 0xffffffff },
11681                 { RCVDBDI_STD_BD+4, 0x0000,
11682                         0x00000000, 0xffffffff },
11683                 { RCVDBDI_STD_BD+8, 0x0000,
11684                         0x00000000, 0xffff0002 },
11685                 { RCVDBDI_STD_BD+0xc, 0x0000,
11686                         0x00000000, 0xffffffff },
11687
11688                 /* Receive BD Initiator Control Registers. */
11689                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11690                         0x00000000, 0xffffffff },
11691                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11692                         0x00000000, 0x000003ff },
11693                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11694                         0x00000000, 0xffffffff },
11695
11696                 /* Host Coalescing Control Registers. */
11697                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11698                         0x00000000, 0x00000004 },
11699                 { HOSTCC_MODE, TG3_FL_5705,
11700                         0x00000000, 0x000000f6 },
11701                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11702                         0x00000000, 0xffffffff },
11703                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11704                         0x00000000, 0x000003ff },
11705                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11706                         0x00000000, 0xffffffff },
11707                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11708                         0x00000000, 0x000003ff },
11709                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11710                         0x00000000, 0xffffffff },
11711                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11712                         0x00000000, 0x000000ff },
11713                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11714                         0x00000000, 0xffffffff },
11715                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11716                         0x00000000, 0x000000ff },
11717                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11718                         0x00000000, 0xffffffff },
11719                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11720                         0x00000000, 0xffffffff },
11721                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11722                         0x00000000, 0xffffffff },
11723                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11724                         0x00000000, 0x000000ff },
11725                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11726                         0x00000000, 0xffffffff },
11727                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11728                         0x00000000, 0x000000ff },
11729                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11730                         0x00000000, 0xffffffff },
11731                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11732                         0x00000000, 0xffffffff },
11733                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11734                         0x00000000, 0xffffffff },
11735                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11736                         0x00000000, 0xffffffff },
11737                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11738                         0x00000000, 0xffffffff },
11739                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11740                         0xffffffff, 0x00000000 },
11741                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11742                         0xffffffff, 0x00000000 },
11743
11744                 /* Buffer Manager Control Registers. */
11745                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11746                         0x00000000, 0x007fff80 },
11747                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11748                         0x00000000, 0x007fffff },
11749                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11750                         0x00000000, 0x0000003f },
11751                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11752                         0x00000000, 0x000001ff },
11753                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11754                         0x00000000, 0x000001ff },
11755                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11756                         0xffffffff, 0x00000000 },
11757                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11758                         0xffffffff, 0x00000000 },
11759
11760                 /* Mailbox Registers */
11761                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11762                         0x00000000, 0x000001ff },
11763                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11764                         0x00000000, 0x000001ff },
11765                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11766                         0x00000000, 0x000007ff },
11767                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11768                         0x00000000, 0x000001ff },
11769
11770                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11771         };
11772
11773         is_5705 = is_5750 = 0;
11774         if (tg3_flag(tp, 5705_PLUS)) {
11775                 is_5705 = 1;
11776                 if (tg3_flag(tp, 5750_PLUS))
11777                         is_5750 = 1;
11778         }
11779
11780         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11781                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11782                         continue;
11783
11784                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11785                         continue;
11786
11787                 if (tg3_flag(tp, IS_5788) &&
11788                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11789                         continue;
11790
11791                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11792                         continue;
11793
11794                 offset = (u32) reg_tbl[i].offset;
11795                 read_mask = reg_tbl[i].read_mask;
11796                 write_mask = reg_tbl[i].write_mask;
11797
11798                 /* Save the original register content */
11799                 save_val = tr32(offset);
11800
11801                 /* Determine the read-only value. */
11802                 read_val = save_val & read_mask;
11803
11804                 /* Write zero to the register, then make sure the read-only bits
11805                  * are not changed and the read/write bits are all zeros.
11806                  */
11807                 tw32(offset, 0);
11808
11809                 val = tr32(offset);
11810
11811                 /* Test the read-only and read/write bits. */
11812                 if (((val & read_mask) != read_val) || (val & write_mask))
11813                         goto out;
11814
11815                 /* Write ones to all the bits defined by RdMask and WrMask, then
11816                  * make sure the read-only bits are not changed and the
11817                  * read/write bits are all ones.
11818                  */
11819                 tw32(offset, read_mask | write_mask);
11820
11821                 val = tr32(offset);
11822
11823                 /* Test the read-only bits. */
11824                 if ((val & read_mask) != read_val)
11825                         goto out;
11826
11827                 /* Test the read/write bits. */
11828                 if ((val & write_mask) != write_mask)
11829                         goto out;
11830
11831                 tw32(offset, save_val);
11832         }
11833
11834         return 0;
11835
11836 out:
11837         if (netif_msg_hw(tp))
11838                 netdev_err(tp->dev,
11839                            "Register test failed at offset %x\n", offset);
11840         tw32(offset, save_val);
11841         return -EIO;
11842 }
11843
11844 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11845 {
11846         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11847         int i;
11848         u32 j;
11849
11850         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11851                 for (j = 0; j < len; j += 4) {
11852                         u32 val;
11853
11854                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11855                         tg3_read_mem(tp, offset + j, &val);
11856                         if (val != test_pattern[i])
11857                                 return -EIO;
11858                 }
11859         }
11860         return 0;
11861 }
11862
11863 static int tg3_test_memory(struct tg3 *tp)
11864 {
11865         static struct mem_entry {
11866                 u32 offset;
11867                 u32 len;
11868         } mem_tbl_570x[] = {
11869                 { 0x00000000, 0x00b50},
11870                 { 0x00002000, 0x1c000},
11871                 { 0xffffffff, 0x00000}
11872         }, mem_tbl_5705[] = {
11873                 { 0x00000100, 0x0000c},
11874                 { 0x00000200, 0x00008},
11875                 { 0x00004000, 0x00800},
11876                 { 0x00006000, 0x01000},
11877                 { 0x00008000, 0x02000},
11878                 { 0x00010000, 0x0e000},
11879                 { 0xffffffff, 0x00000}
11880         }, mem_tbl_5755[] = {
11881                 { 0x00000200, 0x00008},
11882                 { 0x00004000, 0x00800},
11883                 { 0x00006000, 0x00800},
11884                 { 0x00008000, 0x02000},
11885                 { 0x00010000, 0x0c000},
11886                 { 0xffffffff, 0x00000}
11887         }, mem_tbl_5906[] = {
11888                 { 0x00000200, 0x00008},
11889                 { 0x00004000, 0x00400},
11890                 { 0x00006000, 0x00400},
11891                 { 0x00008000, 0x01000},
11892                 { 0x00010000, 0x01000},
11893                 { 0xffffffff, 0x00000}
11894         }, mem_tbl_5717[] = {
11895                 { 0x00000200, 0x00008},
11896                 { 0x00010000, 0x0a000},
11897                 { 0x00020000, 0x13c00},
11898                 { 0xffffffff, 0x00000}
11899         }, mem_tbl_57765[] = {
11900                 { 0x00000200, 0x00008},
11901                 { 0x00004000, 0x00800},
11902                 { 0x00006000, 0x09800},
11903                 { 0x00010000, 0x0a000},
11904                 { 0xffffffff, 0x00000}
11905         };
11906         struct mem_entry *mem_tbl;
11907         int err = 0;
11908         int i;
11909
11910         if (tg3_flag(tp, 5717_PLUS))
11911                 mem_tbl = mem_tbl_5717;
11912         else if (tg3_flag(tp, 57765_CLASS))
11913                 mem_tbl = mem_tbl_57765;
11914         else if (tg3_flag(tp, 5755_PLUS))
11915                 mem_tbl = mem_tbl_5755;
11916         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11917                 mem_tbl = mem_tbl_5906;
11918         else if (tg3_flag(tp, 5705_PLUS))
11919                 mem_tbl = mem_tbl_5705;
11920         else
11921                 mem_tbl = mem_tbl_570x;
11922
11923         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11924                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11925                 if (err)
11926                         break;
11927         }
11928
11929         return err;
11930 }
11931
11932 #define TG3_TSO_MSS             500
11933
11934 #define TG3_TSO_IP_HDR_LEN      20
11935 #define TG3_TSO_TCP_HDR_LEN     20
11936 #define TG3_TSO_TCP_OPT_LEN     12
11937
11938 static const u8 tg3_tso_header[] = {
11939 0x08, 0x00,
11940 0x45, 0x00, 0x00, 0x00,
11941 0x00, 0x00, 0x40, 0x00,
11942 0x40, 0x06, 0x00, 0x00,
11943 0x0a, 0x00, 0x00, 0x01,
11944 0x0a, 0x00, 0x00, 0x02,
11945 0x0d, 0x00, 0xe0, 0x00,
11946 0x00, 0x00, 0x01, 0x00,
11947 0x00, 0x00, 0x02, 0x00,
11948 0x80, 0x10, 0x10, 0x00,
11949 0x14, 0x09, 0x00, 0x00,
11950 0x01, 0x01, 0x08, 0x0a,
11951 0x11, 0x11, 0x11, 0x11,
11952 0x11, 0x11, 0x11, 0x11,
11953 };
11954
11955 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11956 {
11957         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11958         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11959         u32 budget;
11960         struct sk_buff *skb;
11961         u8 *tx_data, *rx_data;
11962         dma_addr_t map;
11963         int num_pkts, tx_len, rx_len, i, err;
11964         struct tg3_rx_buffer_desc *desc;
11965         struct tg3_napi *tnapi, *rnapi;
11966         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11967
11968         tnapi = &tp->napi[0];
11969         rnapi = &tp->napi[0];
11970         if (tp->irq_cnt > 1) {
11971                 if (tg3_flag(tp, ENABLE_RSS))
11972                         rnapi = &tp->napi[1];
11973                 if (tg3_flag(tp, ENABLE_TSS))
11974                         tnapi = &tp->napi[1];
11975         }
11976         coal_now = tnapi->coal_now | rnapi->coal_now;
11977
11978         err = -EIO;
11979
11980         tx_len = pktsz;
11981         skb = netdev_alloc_skb(tp->dev, tx_len);
11982         if (!skb)
11983                 return -ENOMEM;
11984
11985         tx_data = skb_put(skb, tx_len);
11986         memcpy(tx_data, tp->dev->dev_addr, 6);
11987         memset(tx_data + 6, 0x0, 8);
11988
11989         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11990
11991         if (tso_loopback) {
11992                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11993
11994                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11995                               TG3_TSO_TCP_OPT_LEN;
11996
11997                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11998                        sizeof(tg3_tso_header));
11999                 mss = TG3_TSO_MSS;
12000
12001                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12002                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12003
12004                 /* Set the total length field in the IP header */
12005                 iph->tot_len = htons((u16)(mss + hdr_len));
12006
12007                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12008                               TXD_FLAG_CPU_POST_DMA);
12009
12010                 if (tg3_flag(tp, HW_TSO_1) ||
12011                     tg3_flag(tp, HW_TSO_2) ||
12012                     tg3_flag(tp, HW_TSO_3)) {
12013                         struct tcphdr *th;
12014                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12015                         th = (struct tcphdr *)&tx_data[val];
12016                         th->check = 0;
12017                 } else
12018                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12019
12020                 if (tg3_flag(tp, HW_TSO_3)) {
12021                         mss |= (hdr_len & 0xc) << 12;
12022                         if (hdr_len & 0x10)
12023                                 base_flags |= 0x00000010;
12024                         base_flags |= (hdr_len & 0x3e0) << 5;
12025                 } else if (tg3_flag(tp, HW_TSO_2))
12026                         mss |= hdr_len << 9;
12027                 else if (tg3_flag(tp, HW_TSO_1) ||
12028                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12029                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12030                 } else {
12031                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12032                 }
12033
12034                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12035         } else {
12036                 num_pkts = 1;
12037                 data_off = ETH_HLEN;
12038
12039                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12040                     tx_len > VLAN_ETH_FRAME_LEN)
12041                         base_flags |= TXD_FLAG_JMB_PKT;
12042         }
12043
12044         for (i = data_off; i < tx_len; i++)
12045                 tx_data[i] = (u8) (i & 0xff);
12046
12047         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12048         if (pci_dma_mapping_error(tp->pdev, map)) {
12049                 dev_kfree_skb(skb);
12050                 return -EIO;
12051         }
12052
12053         val = tnapi->tx_prod;
12054         tnapi->tx_buffers[val].skb = skb;
12055         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12056
12057         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12058                rnapi->coal_now);
12059
12060         udelay(10);
12061
12062         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12063
12064         budget = tg3_tx_avail(tnapi);
12065         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12066                             base_flags | TXD_FLAG_END, mss, 0)) {
12067                 tnapi->tx_buffers[val].skb = NULL;
12068                 dev_kfree_skb(skb);
12069                 return -EIO;
12070         }
12071
12072         tnapi->tx_prod++;
12073
12074         /* Sync BD data before updating mailbox */
12075         wmb();
12076
12077         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12078         tr32_mailbox(tnapi->prodmbox);
12079
12080         udelay(10);
12081
12082         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12083         for (i = 0; i < 35; i++) {
12084                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12085                        coal_now);
12086
12087                 udelay(10);
12088
12089                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12090                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12091                 if ((tx_idx == tnapi->tx_prod) &&
12092                     (rx_idx == (rx_start_idx + num_pkts)))
12093                         break;
12094         }
12095
12096         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12097         dev_kfree_skb(skb);
12098
12099         if (tx_idx != tnapi->tx_prod)
12100                 goto out;
12101
12102         if (rx_idx != rx_start_idx + num_pkts)
12103                 goto out;
12104
12105         val = data_off;
12106         while (rx_idx != rx_start_idx) {
12107                 desc = &rnapi->rx_rcb[rx_start_idx++];
12108                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12109                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12110
12111                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12112                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12113                         goto out;
12114
12115                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12116                          - ETH_FCS_LEN;
12117
12118                 if (!tso_loopback) {
12119                         if (rx_len != tx_len)
12120                                 goto out;
12121
12122                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12123                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12124                                         goto out;
12125                         } else {
12126                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12127                                         goto out;
12128                         }
12129                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12130                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12131                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12132                         goto out;
12133                 }
12134
12135                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12136                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12137                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12138                                              mapping);
12139                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12140                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12141                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12142                                              mapping);
12143                 } else
12144                         goto out;
12145
12146                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12147                                             PCI_DMA_FROMDEVICE);
12148
12149                 rx_data += TG3_RX_OFFSET(tp);
12150                 for (i = data_off; i < rx_len; i++, val++) {
12151                         if (*(rx_data + i) != (u8) (val & 0xff))
12152                                 goto out;
12153                 }
12154         }
12155
12156         err = 0;
12157
12158         /* tg3_free_rings will unmap and free the rx_data */
12159 out:
12160         return err;
12161 }
12162
12163 #define TG3_STD_LOOPBACK_FAILED         1
12164 #define TG3_JMB_LOOPBACK_FAILED         2
12165 #define TG3_TSO_LOOPBACK_FAILED         4
12166 #define TG3_LOOPBACK_FAILED \
12167         (TG3_STD_LOOPBACK_FAILED | \
12168          TG3_JMB_LOOPBACK_FAILED | \
12169          TG3_TSO_LOOPBACK_FAILED)
12170
12171 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12172 {
12173         int err = -EIO;
12174         u32 eee_cap;
12175         u32 jmb_pkt_sz = 9000;
12176
12177         if (tp->dma_limit)
12178                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12179
12180         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12181         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12182
12183         if (!netif_running(tp->dev)) {
12184                 data[0] = TG3_LOOPBACK_FAILED;
12185                 data[1] = TG3_LOOPBACK_FAILED;
12186                 if (do_extlpbk)
12187                         data[2] = TG3_LOOPBACK_FAILED;
12188                 goto done;
12189         }
12190
12191         err = tg3_reset_hw(tp, 1);
12192         if (err) {
12193                 data[0] = TG3_LOOPBACK_FAILED;
12194                 data[1] = TG3_LOOPBACK_FAILED;
12195                 if (do_extlpbk)
12196                         data[2] = TG3_LOOPBACK_FAILED;
12197                 goto done;
12198         }
12199
12200         if (tg3_flag(tp, ENABLE_RSS)) {
12201                 int i;
12202
12203                 /* Reroute all rx packets to the 1st queue */
12204                 for (i = MAC_RSS_INDIR_TBL_0;
12205                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12206                         tw32(i, 0x0);
12207         }
12208
12209         /* HW errata - mac loopback fails in some cases on 5780.
12210          * Normal traffic and PHY loopback are not affected by
12211          * errata.  Also, the MAC loopback test is deprecated for
12212          * all newer ASIC revisions.
12213          */
12214         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12215             !tg3_flag(tp, CPMU_PRESENT)) {
12216                 tg3_mac_loopback(tp, true);
12217
12218                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12219                         data[0] |= TG3_STD_LOOPBACK_FAILED;
12220
12221                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12222                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12223                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
12224
12225                 tg3_mac_loopback(tp, false);
12226         }
12227
12228         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12229             !tg3_flag(tp, USE_PHYLIB)) {
12230                 int i;
12231
12232                 tg3_phy_lpbk_set(tp, 0, false);
12233
12234                 /* Wait for link */
12235                 for (i = 0; i < 100; i++) {
12236                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12237                                 break;
12238                         mdelay(1);
12239                 }
12240
12241                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12242                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12243                 if (tg3_flag(tp, TSO_CAPABLE) &&
12244                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12245                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12246                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12247                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12248                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12249
12250                 if (do_extlpbk) {
12251                         tg3_phy_lpbk_set(tp, 0, true);
12252
12253                         /* All link indications report up, but the hardware
12254                          * isn't really ready for about 20 msec.  Double it
12255                          * to be sure.
12256                          */
12257                         mdelay(40);
12258
12259                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12260                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12261                         if (tg3_flag(tp, TSO_CAPABLE) &&
12262                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12263                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12264                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12265                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12266                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12267                 }
12268
12269                 /* Re-enable gphy autopowerdown. */
12270                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12271                         tg3_phy_toggle_apd(tp, true);
12272         }
12273
12274         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12275
12276 done:
12277         tp->phy_flags |= eee_cap;
12278
12279         return err;
12280 }
12281
12282 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12283                           u64 *data)
12284 {
12285         struct tg3 *tp = netdev_priv(dev);
12286         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12287
12288         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12289             tg3_power_up(tp)) {
12290                 etest->flags |= ETH_TEST_FL_FAILED;
12291                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12292                 return;
12293         }
12294
12295         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12296
12297         if (tg3_test_nvram(tp) != 0) {
12298                 etest->flags |= ETH_TEST_FL_FAILED;
12299                 data[0] = 1;
12300         }
12301         if (!doextlpbk && tg3_test_link(tp)) {
12302                 etest->flags |= ETH_TEST_FL_FAILED;
12303                 data[1] = 1;
12304         }
12305         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12306                 int err, err2 = 0, irq_sync = 0;
12307
12308                 if (netif_running(dev)) {
12309                         tg3_phy_stop(tp);
12310                         tg3_netif_stop(tp);
12311                         irq_sync = 1;
12312                 }
12313
12314                 tg3_full_lock(tp, irq_sync);
12315
12316                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12317                 err = tg3_nvram_lock(tp);
12318                 tg3_halt_cpu(tp, RX_CPU_BASE);
12319                 if (!tg3_flag(tp, 5705_PLUS))
12320                         tg3_halt_cpu(tp, TX_CPU_BASE);
12321                 if (!err)
12322                         tg3_nvram_unlock(tp);
12323
12324                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12325                         tg3_phy_reset(tp);
12326
12327                 if (tg3_test_registers(tp) != 0) {
12328                         etest->flags |= ETH_TEST_FL_FAILED;
12329                         data[2] = 1;
12330                 }
12331
12332                 if (tg3_test_memory(tp) != 0) {
12333                         etest->flags |= ETH_TEST_FL_FAILED;
12334                         data[3] = 1;
12335                 }
12336
12337                 if (doextlpbk)
12338                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12339
12340                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12341                         etest->flags |= ETH_TEST_FL_FAILED;
12342
12343                 tg3_full_unlock(tp);
12344
12345                 if (tg3_test_interrupt(tp) != 0) {
12346                         etest->flags |= ETH_TEST_FL_FAILED;
12347                         data[7] = 1;
12348                 }
12349
12350                 tg3_full_lock(tp, 0);
12351
12352                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12353                 if (netif_running(dev)) {
12354                         tg3_flag_set(tp, INIT_COMPLETE);
12355                         err2 = tg3_restart_hw(tp, 1);
12356                         if (!err2)
12357                                 tg3_netif_start(tp);
12358                 }
12359
12360                 tg3_full_unlock(tp);
12361
12362                 if (irq_sync && !err2)
12363                         tg3_phy_start(tp);
12364         }
12365         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12366                 tg3_power_down(tp);
12367
12368 }
12369
12370 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12371 {
12372         struct mii_ioctl_data *data = if_mii(ifr);
12373         struct tg3 *tp = netdev_priv(dev);
12374         int err;
12375
12376         if (tg3_flag(tp, USE_PHYLIB)) {
12377                 struct phy_device *phydev;
12378                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12379                         return -EAGAIN;
12380                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12381                 return phy_mii_ioctl(phydev, ifr, cmd);
12382         }
12383
12384         switch (cmd) {
12385         case SIOCGMIIPHY:
12386                 data->phy_id = tp->phy_addr;
12387
12388                 /* fallthru */
12389         case SIOCGMIIREG: {
12390                 u32 mii_regval;
12391
12392                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12393                         break;                  /* We have no PHY */
12394
12395                 if (!netif_running(dev))
12396                         return -EAGAIN;
12397
12398                 spin_lock_bh(&tp->lock);
12399                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12400                 spin_unlock_bh(&tp->lock);
12401
12402                 data->val_out = mii_regval;
12403
12404                 return err;
12405         }
12406
12407         case SIOCSMIIREG:
12408                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12409                         break;                  /* We have no PHY */
12410
12411                 if (!netif_running(dev))
12412                         return -EAGAIN;
12413
12414                 spin_lock_bh(&tp->lock);
12415                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12416                 spin_unlock_bh(&tp->lock);
12417
12418                 return err;
12419
12420         default:
12421                 /* do nothing */
12422                 break;
12423         }
12424         return -EOPNOTSUPP;
12425 }
12426
12427 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12428 {
12429         struct tg3 *tp = netdev_priv(dev);
12430
12431         memcpy(ec, &tp->coal, sizeof(*ec));
12432         return 0;
12433 }
12434
12435 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12436 {
12437         struct tg3 *tp = netdev_priv(dev);
12438         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12439         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12440
12441         if (!tg3_flag(tp, 5705_PLUS)) {
12442                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12443                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12444                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12445                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12446         }
12447
12448         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12449             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12450             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12451             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12452             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12453             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12454             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12455             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12456             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12457             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12458                 return -EINVAL;
12459
12460         /* No rx interrupts will be generated if both are zero */
12461         if ((ec->rx_coalesce_usecs == 0) &&
12462             (ec->rx_max_coalesced_frames == 0))
12463                 return -EINVAL;
12464
12465         /* No tx interrupts will be generated if both are zero */
12466         if ((ec->tx_coalesce_usecs == 0) &&
12467             (ec->tx_max_coalesced_frames == 0))
12468                 return -EINVAL;
12469
12470         /* Only copy relevant parameters, ignore all others. */
12471         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12472         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12473         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12474         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12475         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12476         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12477         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12478         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12479         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12480
12481         if (netif_running(dev)) {
12482                 tg3_full_lock(tp, 0);
12483                 __tg3_set_coalesce(tp, &tp->coal);
12484                 tg3_full_unlock(tp);
12485         }
12486         return 0;
12487 }
12488
12489 static const struct ethtool_ops tg3_ethtool_ops = {
12490         .get_settings           = tg3_get_settings,
12491         .set_settings           = tg3_set_settings,
12492         .get_drvinfo            = tg3_get_drvinfo,
12493         .get_regs_len           = tg3_get_regs_len,
12494         .get_regs               = tg3_get_regs,
12495         .get_wol                = tg3_get_wol,
12496         .set_wol                = tg3_set_wol,
12497         .get_msglevel           = tg3_get_msglevel,
12498         .set_msglevel           = tg3_set_msglevel,
12499         .nway_reset             = tg3_nway_reset,
12500         .get_link               = ethtool_op_get_link,
12501         .get_eeprom_len         = tg3_get_eeprom_len,
12502         .get_eeprom             = tg3_get_eeprom,
12503         .set_eeprom             = tg3_set_eeprom,
12504         .get_ringparam          = tg3_get_ringparam,
12505         .set_ringparam          = tg3_set_ringparam,
12506         .get_pauseparam         = tg3_get_pauseparam,
12507         .set_pauseparam         = tg3_set_pauseparam,
12508         .self_test              = tg3_self_test,
12509         .get_strings            = tg3_get_strings,
12510         .set_phys_id            = tg3_set_phys_id,
12511         .get_ethtool_stats      = tg3_get_ethtool_stats,
12512         .get_coalesce           = tg3_get_coalesce,
12513         .set_coalesce           = tg3_set_coalesce,
12514         .get_sset_count         = tg3_get_sset_count,
12515         .get_rxnfc              = tg3_get_rxnfc,
12516         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12517         .get_rxfh_indir         = tg3_get_rxfh_indir,
12518         .set_rxfh_indir         = tg3_set_rxfh_indir,
12519         .get_ts_info            = ethtool_op_get_ts_info,
12520 };
12521
12522 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12523                                                 struct rtnl_link_stats64 *stats)
12524 {
12525         struct tg3 *tp = netdev_priv(dev);
12526
12527         spin_lock_bh(&tp->lock);
12528         if (!tp->hw_stats) {
12529                 spin_unlock_bh(&tp->lock);
12530                 return &tp->net_stats_prev;
12531         }
12532
12533         tg3_get_nstats(tp, stats);
12534         spin_unlock_bh(&tp->lock);
12535
12536         return stats;
12537 }
12538
12539 static void tg3_set_rx_mode(struct net_device *dev)
12540 {
12541         struct tg3 *tp = netdev_priv(dev);
12542
12543         if (!netif_running(dev))
12544                 return;
12545
12546         tg3_full_lock(tp, 0);
12547         __tg3_set_rx_mode(dev);
12548         tg3_full_unlock(tp);
12549 }
12550
12551 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12552                                int new_mtu)
12553 {
12554         dev->mtu = new_mtu;
12555
12556         if (new_mtu > ETH_DATA_LEN) {
12557                 if (tg3_flag(tp, 5780_CLASS)) {
12558                         netdev_update_features(dev);
12559                         tg3_flag_clear(tp, TSO_CAPABLE);
12560                 } else {
12561                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12562                 }
12563         } else {
12564                 if (tg3_flag(tp, 5780_CLASS)) {
12565                         tg3_flag_set(tp, TSO_CAPABLE);
12566                         netdev_update_features(dev);
12567                 }
12568                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12569         }
12570 }
12571
12572 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12573 {
12574         struct tg3 *tp = netdev_priv(dev);
12575         int err, reset_phy = 0;
12576
12577         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12578                 return -EINVAL;
12579
12580         if (!netif_running(dev)) {
12581                 /* We'll just catch it later when the
12582                  * device is up'd.
12583                  */
12584                 tg3_set_mtu(dev, tp, new_mtu);
12585                 return 0;
12586         }
12587
12588         tg3_phy_stop(tp);
12589
12590         tg3_netif_stop(tp);
12591
12592         tg3_full_lock(tp, 1);
12593
12594         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12595
12596         tg3_set_mtu(dev, tp, new_mtu);
12597
12598         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12599          * breaks all requests to 256 bytes.
12600          */
12601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12602                 reset_phy = 1;
12603
12604         err = tg3_restart_hw(tp, reset_phy);
12605
12606         if (!err)
12607                 tg3_netif_start(tp);
12608
12609         tg3_full_unlock(tp);
12610
12611         if (!err)
12612                 tg3_phy_start(tp);
12613
12614         return err;
12615 }
12616
12617 static const struct net_device_ops tg3_netdev_ops = {
12618         .ndo_open               = tg3_open,
12619         .ndo_stop               = tg3_close,
12620         .ndo_start_xmit         = tg3_start_xmit,
12621         .ndo_get_stats64        = tg3_get_stats64,
12622         .ndo_validate_addr      = eth_validate_addr,
12623         .ndo_set_rx_mode        = tg3_set_rx_mode,
12624         .ndo_set_mac_address    = tg3_set_mac_addr,
12625         .ndo_do_ioctl           = tg3_ioctl,
12626         .ndo_tx_timeout         = tg3_tx_timeout,
12627         .ndo_change_mtu         = tg3_change_mtu,
12628         .ndo_fix_features       = tg3_fix_features,
12629         .ndo_set_features       = tg3_set_features,
12630 #ifdef CONFIG_NET_POLL_CONTROLLER
12631         .ndo_poll_controller    = tg3_poll_controller,
12632 #endif
12633 };
12634
12635 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12636 {
12637         u32 cursize, val, magic;
12638
12639         tp->nvram_size = EEPROM_CHIP_SIZE;
12640
12641         if (tg3_nvram_read(tp, 0, &magic) != 0)
12642                 return;
12643
12644         if ((magic != TG3_EEPROM_MAGIC) &&
12645             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12646             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12647                 return;
12648
12649         /*
12650          * Size the chip by reading offsets at increasing powers of two.
12651          * When we encounter our validation signature, we know the addressing
12652          * has wrapped around, and thus have our chip size.
12653          */
12654         cursize = 0x10;
12655
12656         while (cursize < tp->nvram_size) {
12657                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12658                         return;
12659
12660                 if (val == magic)
12661                         break;
12662
12663                 cursize <<= 1;
12664         }
12665
12666         tp->nvram_size = cursize;
12667 }
12668
12669 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12670 {
12671         u32 val;
12672
12673         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12674                 return;
12675
12676         /* Selfboot format */
12677         if (val != TG3_EEPROM_MAGIC) {
12678                 tg3_get_eeprom_size(tp);
12679                 return;
12680         }
12681
12682         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12683                 if (val != 0) {
12684                         /* This is confusing.  We want to operate on the
12685                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12686                          * call will read from NVRAM and byteswap the data
12687                          * according to the byteswapping settings for all
12688                          * other register accesses.  This ensures the data we
12689                          * want will always reside in the lower 16-bits.
12690                          * However, the data in NVRAM is in LE format, which
12691                          * means the data from the NVRAM read will always be
12692                          * opposite the endianness of the CPU.  The 16-bit
12693                          * byteswap then brings the data to CPU endianness.
12694                          */
12695                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12696                         return;
12697                 }
12698         }
12699         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12700 }
12701
12702 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12703 {
12704         u32 nvcfg1;
12705
12706         nvcfg1 = tr32(NVRAM_CFG1);
12707         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12708                 tg3_flag_set(tp, FLASH);
12709         } else {
12710                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12711                 tw32(NVRAM_CFG1, nvcfg1);
12712         }
12713
12714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12715             tg3_flag(tp, 5780_CLASS)) {
12716                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12717                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12718                         tp->nvram_jedecnum = JEDEC_ATMEL;
12719                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12720                         tg3_flag_set(tp, NVRAM_BUFFERED);
12721                         break;
12722                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12723                         tp->nvram_jedecnum = JEDEC_ATMEL;
12724                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12725                         break;
12726                 case FLASH_VENDOR_ATMEL_EEPROM:
12727                         tp->nvram_jedecnum = JEDEC_ATMEL;
12728                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12729                         tg3_flag_set(tp, NVRAM_BUFFERED);
12730                         break;
12731                 case FLASH_VENDOR_ST:
12732                         tp->nvram_jedecnum = JEDEC_ST;
12733                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12734                         tg3_flag_set(tp, NVRAM_BUFFERED);
12735                         break;
12736                 case FLASH_VENDOR_SAIFUN:
12737                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12738                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12739                         break;
12740                 case FLASH_VENDOR_SST_SMALL:
12741                 case FLASH_VENDOR_SST_LARGE:
12742                         tp->nvram_jedecnum = JEDEC_SST;
12743                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12744                         break;
12745                 }
12746         } else {
12747                 tp->nvram_jedecnum = JEDEC_ATMEL;
12748                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12749                 tg3_flag_set(tp, NVRAM_BUFFERED);
12750         }
12751 }
12752
12753 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12754 {
12755         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12756         case FLASH_5752PAGE_SIZE_256:
12757                 tp->nvram_pagesize = 256;
12758                 break;
12759         case FLASH_5752PAGE_SIZE_512:
12760                 tp->nvram_pagesize = 512;
12761                 break;
12762         case FLASH_5752PAGE_SIZE_1K:
12763                 tp->nvram_pagesize = 1024;
12764                 break;
12765         case FLASH_5752PAGE_SIZE_2K:
12766                 tp->nvram_pagesize = 2048;
12767                 break;
12768         case FLASH_5752PAGE_SIZE_4K:
12769                 tp->nvram_pagesize = 4096;
12770                 break;
12771         case FLASH_5752PAGE_SIZE_264:
12772                 tp->nvram_pagesize = 264;
12773                 break;
12774         case FLASH_5752PAGE_SIZE_528:
12775                 tp->nvram_pagesize = 528;
12776                 break;
12777         }
12778 }
12779
12780 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12781 {
12782         u32 nvcfg1;
12783
12784         nvcfg1 = tr32(NVRAM_CFG1);
12785
12786         /* NVRAM protection for TPM */
12787         if (nvcfg1 & (1 << 27))
12788                 tg3_flag_set(tp, PROTECTED_NVRAM);
12789
12790         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12791         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12792         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12793                 tp->nvram_jedecnum = JEDEC_ATMEL;
12794                 tg3_flag_set(tp, NVRAM_BUFFERED);
12795                 break;
12796         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12797                 tp->nvram_jedecnum = JEDEC_ATMEL;
12798                 tg3_flag_set(tp, NVRAM_BUFFERED);
12799                 tg3_flag_set(tp, FLASH);
12800                 break;
12801         case FLASH_5752VENDOR_ST_M45PE10:
12802         case FLASH_5752VENDOR_ST_M45PE20:
12803         case FLASH_5752VENDOR_ST_M45PE40:
12804                 tp->nvram_jedecnum = JEDEC_ST;
12805                 tg3_flag_set(tp, NVRAM_BUFFERED);
12806                 tg3_flag_set(tp, FLASH);
12807                 break;
12808         }
12809
12810         if (tg3_flag(tp, FLASH)) {
12811                 tg3_nvram_get_pagesize(tp, nvcfg1);
12812         } else {
12813                 /* For eeprom, set pagesize to maximum eeprom size */
12814                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12815
12816                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12817                 tw32(NVRAM_CFG1, nvcfg1);
12818         }
12819 }
12820
12821 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12822 {
12823         u32 nvcfg1, protect = 0;
12824
12825         nvcfg1 = tr32(NVRAM_CFG1);
12826
12827         /* NVRAM protection for TPM */
12828         if (nvcfg1 & (1 << 27)) {
12829                 tg3_flag_set(tp, PROTECTED_NVRAM);
12830                 protect = 1;
12831         }
12832
12833         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12834         switch (nvcfg1) {
12835         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12836         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12837         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12838         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12839                 tp->nvram_jedecnum = JEDEC_ATMEL;
12840                 tg3_flag_set(tp, NVRAM_BUFFERED);
12841                 tg3_flag_set(tp, FLASH);
12842                 tp->nvram_pagesize = 264;
12843                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12844                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12845                         tp->nvram_size = (protect ? 0x3e200 :
12846                                           TG3_NVRAM_SIZE_512KB);
12847                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12848                         tp->nvram_size = (protect ? 0x1f200 :
12849                                           TG3_NVRAM_SIZE_256KB);
12850                 else
12851                         tp->nvram_size = (protect ? 0x1f200 :
12852                                           TG3_NVRAM_SIZE_128KB);
12853                 break;
12854         case FLASH_5752VENDOR_ST_M45PE10:
12855         case FLASH_5752VENDOR_ST_M45PE20:
12856         case FLASH_5752VENDOR_ST_M45PE40:
12857                 tp->nvram_jedecnum = JEDEC_ST;
12858                 tg3_flag_set(tp, NVRAM_BUFFERED);
12859                 tg3_flag_set(tp, FLASH);
12860                 tp->nvram_pagesize = 256;
12861                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12862                         tp->nvram_size = (protect ?
12863                                           TG3_NVRAM_SIZE_64KB :
12864                                           TG3_NVRAM_SIZE_128KB);
12865                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12866                         tp->nvram_size = (protect ?
12867                                           TG3_NVRAM_SIZE_64KB :
12868                                           TG3_NVRAM_SIZE_256KB);
12869                 else
12870                         tp->nvram_size = (protect ?
12871                                           TG3_NVRAM_SIZE_128KB :
12872                                           TG3_NVRAM_SIZE_512KB);
12873                 break;
12874         }
12875 }
12876
12877 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12878 {
12879         u32 nvcfg1;
12880
12881         nvcfg1 = tr32(NVRAM_CFG1);
12882
12883         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12884         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12885         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12886         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12887         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12888                 tp->nvram_jedecnum = JEDEC_ATMEL;
12889                 tg3_flag_set(tp, NVRAM_BUFFERED);
12890                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12891
12892                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12893                 tw32(NVRAM_CFG1, nvcfg1);
12894                 break;
12895         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12896         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12897         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12898         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12899                 tp->nvram_jedecnum = JEDEC_ATMEL;
12900                 tg3_flag_set(tp, NVRAM_BUFFERED);
12901                 tg3_flag_set(tp, FLASH);
12902                 tp->nvram_pagesize = 264;
12903                 break;
12904         case FLASH_5752VENDOR_ST_M45PE10:
12905         case FLASH_5752VENDOR_ST_M45PE20:
12906         case FLASH_5752VENDOR_ST_M45PE40:
12907                 tp->nvram_jedecnum = JEDEC_ST;
12908                 tg3_flag_set(tp, NVRAM_BUFFERED);
12909                 tg3_flag_set(tp, FLASH);
12910                 tp->nvram_pagesize = 256;
12911                 break;
12912         }
12913 }
12914
12915 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12916 {
12917         u32 nvcfg1, protect = 0;
12918
12919         nvcfg1 = tr32(NVRAM_CFG1);
12920
12921         /* NVRAM protection for TPM */
12922         if (nvcfg1 & (1 << 27)) {
12923                 tg3_flag_set(tp, PROTECTED_NVRAM);
12924                 protect = 1;
12925         }
12926
12927         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12928         switch (nvcfg1) {
12929         case FLASH_5761VENDOR_ATMEL_ADB021D:
12930         case FLASH_5761VENDOR_ATMEL_ADB041D:
12931         case FLASH_5761VENDOR_ATMEL_ADB081D:
12932         case FLASH_5761VENDOR_ATMEL_ADB161D:
12933         case FLASH_5761VENDOR_ATMEL_MDB021D:
12934         case FLASH_5761VENDOR_ATMEL_MDB041D:
12935         case FLASH_5761VENDOR_ATMEL_MDB081D:
12936         case FLASH_5761VENDOR_ATMEL_MDB161D:
12937                 tp->nvram_jedecnum = JEDEC_ATMEL;
12938                 tg3_flag_set(tp, NVRAM_BUFFERED);
12939                 tg3_flag_set(tp, FLASH);
12940                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12941                 tp->nvram_pagesize = 256;
12942                 break;
12943         case FLASH_5761VENDOR_ST_A_M45PE20:
12944         case FLASH_5761VENDOR_ST_A_M45PE40:
12945         case FLASH_5761VENDOR_ST_A_M45PE80:
12946         case FLASH_5761VENDOR_ST_A_M45PE16:
12947         case FLASH_5761VENDOR_ST_M_M45PE20:
12948         case FLASH_5761VENDOR_ST_M_M45PE40:
12949         case FLASH_5761VENDOR_ST_M_M45PE80:
12950         case FLASH_5761VENDOR_ST_M_M45PE16:
12951                 tp->nvram_jedecnum = JEDEC_ST;
12952                 tg3_flag_set(tp, NVRAM_BUFFERED);
12953                 tg3_flag_set(tp, FLASH);
12954                 tp->nvram_pagesize = 256;
12955                 break;
12956         }
12957
12958         if (protect) {
12959                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12960         } else {
12961                 switch (nvcfg1) {
12962                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12963                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12964                 case FLASH_5761VENDOR_ST_A_M45PE16:
12965                 case FLASH_5761VENDOR_ST_M_M45PE16:
12966                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12967                         break;
12968                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12969                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12970                 case FLASH_5761VENDOR_ST_A_M45PE80:
12971                 case FLASH_5761VENDOR_ST_M_M45PE80:
12972                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12973                         break;
12974                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12975                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12976                 case FLASH_5761VENDOR_ST_A_M45PE40:
12977                 case FLASH_5761VENDOR_ST_M_M45PE40:
12978                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12979                         break;
12980                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12981                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12982                 case FLASH_5761VENDOR_ST_A_M45PE20:
12983                 case FLASH_5761VENDOR_ST_M_M45PE20:
12984                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12985                         break;
12986                 }
12987         }
12988 }
12989
12990 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12991 {
12992         tp->nvram_jedecnum = JEDEC_ATMEL;
12993         tg3_flag_set(tp, NVRAM_BUFFERED);
12994         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12995 }
12996
12997 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12998 {
12999         u32 nvcfg1;
13000
13001         nvcfg1 = tr32(NVRAM_CFG1);
13002
13003         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13004         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13005         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13006                 tp->nvram_jedecnum = JEDEC_ATMEL;
13007                 tg3_flag_set(tp, NVRAM_BUFFERED);
13008                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13009
13010                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13011                 tw32(NVRAM_CFG1, nvcfg1);
13012                 return;
13013         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13014         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13015         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13016         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13017         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13018         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13019         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13020                 tp->nvram_jedecnum = JEDEC_ATMEL;
13021                 tg3_flag_set(tp, NVRAM_BUFFERED);
13022                 tg3_flag_set(tp, FLASH);
13023
13024                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13025                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13026                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13027                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13028                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13029                         break;
13030                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13031                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13032                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13033                         break;
13034                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13035                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13036                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13037                         break;
13038                 }
13039                 break;
13040         case FLASH_5752VENDOR_ST_M45PE10:
13041         case FLASH_5752VENDOR_ST_M45PE20:
13042         case FLASH_5752VENDOR_ST_M45PE40:
13043                 tp->nvram_jedecnum = JEDEC_ST;
13044                 tg3_flag_set(tp, NVRAM_BUFFERED);
13045                 tg3_flag_set(tp, FLASH);
13046
13047                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13048                 case FLASH_5752VENDOR_ST_M45PE10:
13049                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13050                         break;
13051                 case FLASH_5752VENDOR_ST_M45PE20:
13052                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13053                         break;
13054                 case FLASH_5752VENDOR_ST_M45PE40:
13055                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13056                         break;
13057                 }
13058                 break;
13059         default:
13060                 tg3_flag_set(tp, NO_NVRAM);
13061                 return;
13062         }
13063
13064         tg3_nvram_get_pagesize(tp, nvcfg1);
13065         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13066                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13067 }
13068
13069
13070 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13071 {
13072         u32 nvcfg1;
13073
13074         nvcfg1 = tr32(NVRAM_CFG1);
13075
13076         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13077         case FLASH_5717VENDOR_ATMEL_EEPROM:
13078         case FLASH_5717VENDOR_MICRO_EEPROM:
13079                 tp->nvram_jedecnum = JEDEC_ATMEL;
13080                 tg3_flag_set(tp, NVRAM_BUFFERED);
13081                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13082
13083                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13084                 tw32(NVRAM_CFG1, nvcfg1);
13085                 return;
13086         case FLASH_5717VENDOR_ATMEL_MDB011D:
13087         case FLASH_5717VENDOR_ATMEL_ADB011B:
13088         case FLASH_5717VENDOR_ATMEL_ADB011D:
13089         case FLASH_5717VENDOR_ATMEL_MDB021D:
13090         case FLASH_5717VENDOR_ATMEL_ADB021B:
13091         case FLASH_5717VENDOR_ATMEL_ADB021D:
13092         case FLASH_5717VENDOR_ATMEL_45USPT:
13093                 tp->nvram_jedecnum = JEDEC_ATMEL;
13094                 tg3_flag_set(tp, NVRAM_BUFFERED);
13095                 tg3_flag_set(tp, FLASH);
13096
13097                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13098                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13099                         /* Detect size with tg3_nvram_get_size() */
13100                         break;
13101                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13102                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13103                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13104                         break;
13105                 default:
13106                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13107                         break;
13108                 }
13109                 break;
13110         case FLASH_5717VENDOR_ST_M_M25PE10:
13111         case FLASH_5717VENDOR_ST_A_M25PE10:
13112         case FLASH_5717VENDOR_ST_M_M45PE10:
13113         case FLASH_5717VENDOR_ST_A_M45PE10:
13114         case FLASH_5717VENDOR_ST_M_M25PE20:
13115         case FLASH_5717VENDOR_ST_A_M25PE20:
13116         case FLASH_5717VENDOR_ST_M_M45PE20:
13117         case FLASH_5717VENDOR_ST_A_M45PE20:
13118         case FLASH_5717VENDOR_ST_25USPT:
13119         case FLASH_5717VENDOR_ST_45USPT:
13120                 tp->nvram_jedecnum = JEDEC_ST;
13121                 tg3_flag_set(tp, NVRAM_BUFFERED);
13122                 tg3_flag_set(tp, FLASH);
13123
13124                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13125                 case FLASH_5717VENDOR_ST_M_M25PE20:
13126                 case FLASH_5717VENDOR_ST_M_M45PE20:
13127                         /* Detect size with tg3_nvram_get_size() */
13128                         break;
13129                 case FLASH_5717VENDOR_ST_A_M25PE20:
13130                 case FLASH_5717VENDOR_ST_A_M45PE20:
13131                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13132                         break;
13133                 default:
13134                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13135                         break;
13136                 }
13137                 break;
13138         default:
13139                 tg3_flag_set(tp, NO_NVRAM);
13140                 return;
13141         }
13142
13143         tg3_nvram_get_pagesize(tp, nvcfg1);
13144         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13145                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13146 }
13147
13148 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13149 {
13150         u32 nvcfg1, nvmpinstrp;
13151
13152         nvcfg1 = tr32(NVRAM_CFG1);
13153         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13154
13155         switch (nvmpinstrp) {
13156         case FLASH_5720_EEPROM_HD:
13157         case FLASH_5720_EEPROM_LD:
13158                 tp->nvram_jedecnum = JEDEC_ATMEL;
13159                 tg3_flag_set(tp, NVRAM_BUFFERED);
13160
13161                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13162                 tw32(NVRAM_CFG1, nvcfg1);
13163                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13164                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13165                 else
13166                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13167                 return;
13168         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13169         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13170         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13171         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13172         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13173         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13174         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13175         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13176         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13177         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13178         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13179         case FLASH_5720VENDOR_ATMEL_45USPT:
13180                 tp->nvram_jedecnum = JEDEC_ATMEL;
13181                 tg3_flag_set(tp, NVRAM_BUFFERED);
13182                 tg3_flag_set(tp, FLASH);
13183
13184                 switch (nvmpinstrp) {
13185                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13186                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13187                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13188                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13189                         break;
13190                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13191                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13192                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13193                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13194                         break;
13195                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13196                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13197                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13198                         break;
13199                 default:
13200                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13201                         break;
13202                 }
13203                 break;
13204         case FLASH_5720VENDOR_M_ST_M25PE10:
13205         case FLASH_5720VENDOR_M_ST_M45PE10:
13206         case FLASH_5720VENDOR_A_ST_M25PE10:
13207         case FLASH_5720VENDOR_A_ST_M45PE10:
13208         case FLASH_5720VENDOR_M_ST_M25PE20:
13209         case FLASH_5720VENDOR_M_ST_M45PE20:
13210         case FLASH_5720VENDOR_A_ST_M25PE20:
13211         case FLASH_5720VENDOR_A_ST_M45PE20:
13212         case FLASH_5720VENDOR_M_ST_M25PE40:
13213         case FLASH_5720VENDOR_M_ST_M45PE40:
13214         case FLASH_5720VENDOR_A_ST_M25PE40:
13215         case FLASH_5720VENDOR_A_ST_M45PE40:
13216         case FLASH_5720VENDOR_M_ST_M25PE80:
13217         case FLASH_5720VENDOR_M_ST_M45PE80:
13218         case FLASH_5720VENDOR_A_ST_M25PE80:
13219         case FLASH_5720VENDOR_A_ST_M45PE80:
13220         case FLASH_5720VENDOR_ST_25USPT:
13221         case FLASH_5720VENDOR_ST_45USPT:
13222                 tp->nvram_jedecnum = JEDEC_ST;
13223                 tg3_flag_set(tp, NVRAM_BUFFERED);
13224                 tg3_flag_set(tp, FLASH);
13225
13226                 switch (nvmpinstrp) {
13227                 case FLASH_5720VENDOR_M_ST_M25PE20:
13228                 case FLASH_5720VENDOR_M_ST_M45PE20:
13229                 case FLASH_5720VENDOR_A_ST_M25PE20:
13230                 case FLASH_5720VENDOR_A_ST_M45PE20:
13231                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13232                         break;
13233                 case FLASH_5720VENDOR_M_ST_M25PE40:
13234                 case FLASH_5720VENDOR_M_ST_M45PE40:
13235                 case FLASH_5720VENDOR_A_ST_M25PE40:
13236                 case FLASH_5720VENDOR_A_ST_M45PE40:
13237                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13238                         break;
13239                 case FLASH_5720VENDOR_M_ST_M25PE80:
13240                 case FLASH_5720VENDOR_M_ST_M45PE80:
13241                 case FLASH_5720VENDOR_A_ST_M25PE80:
13242                 case FLASH_5720VENDOR_A_ST_M45PE80:
13243                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13244                         break;
13245                 default:
13246                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13247                         break;
13248                 }
13249                 break;
13250         default:
13251                 tg3_flag_set(tp, NO_NVRAM);
13252                 return;
13253         }
13254
13255         tg3_nvram_get_pagesize(tp, nvcfg1);
13256         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13257                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13258 }
13259
13260 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13261 static void __devinit tg3_nvram_init(struct tg3 *tp)
13262 {
13263         tw32_f(GRC_EEPROM_ADDR,
13264              (EEPROM_ADDR_FSM_RESET |
13265               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13266                EEPROM_ADDR_CLKPERD_SHIFT)));
13267
13268         msleep(1);
13269
13270         /* Enable seeprom accesses. */
13271         tw32_f(GRC_LOCAL_CTRL,
13272              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13273         udelay(100);
13274
13275         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13276             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13277                 tg3_flag_set(tp, NVRAM);
13278
13279                 if (tg3_nvram_lock(tp)) {
13280                         netdev_warn(tp->dev,
13281                                     "Cannot get nvram lock, %s failed\n",
13282                                     __func__);
13283                         return;
13284                 }
13285                 tg3_enable_nvram_access(tp);
13286
13287                 tp->nvram_size = 0;
13288
13289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13290                         tg3_get_5752_nvram_info(tp);
13291                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13292                         tg3_get_5755_nvram_info(tp);
13293                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13294                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13295                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13296                         tg3_get_5787_nvram_info(tp);
13297                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13298                         tg3_get_5761_nvram_info(tp);
13299                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13300                         tg3_get_5906_nvram_info(tp);
13301                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13302                          tg3_flag(tp, 57765_CLASS))
13303                         tg3_get_57780_nvram_info(tp);
13304                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13305                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13306                         tg3_get_5717_nvram_info(tp);
13307                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13308                         tg3_get_5720_nvram_info(tp);
13309                 else
13310                         tg3_get_nvram_info(tp);
13311
13312                 if (tp->nvram_size == 0)
13313                         tg3_get_nvram_size(tp);
13314
13315                 tg3_disable_nvram_access(tp);
13316                 tg3_nvram_unlock(tp);
13317
13318         } else {
13319                 tg3_flag_clear(tp, NVRAM);
13320                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13321
13322                 tg3_get_eeprom_size(tp);
13323         }
13324 }
13325
13326 struct subsys_tbl_ent {
13327         u16 subsys_vendor, subsys_devid;
13328         u32 phy_id;
13329 };
13330
13331 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13332         /* Broadcom boards. */
13333         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13334           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13335         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13336           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13337         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13338           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13339         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13340           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13341         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13342           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13343         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13344           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13345         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13346           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13347         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13348           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13349         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13350           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13351         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13352           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13353         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13354           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13355
13356         /* 3com boards. */
13357         { TG3PCI_SUBVENDOR_ID_3COM,
13358           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13359         { TG3PCI_SUBVENDOR_ID_3COM,
13360           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13361         { TG3PCI_SUBVENDOR_ID_3COM,
13362           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13363         { TG3PCI_SUBVENDOR_ID_3COM,
13364           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13365         { TG3PCI_SUBVENDOR_ID_3COM,
13366           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13367
13368         /* DELL boards. */
13369         { TG3PCI_SUBVENDOR_ID_DELL,
13370           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13371         { TG3PCI_SUBVENDOR_ID_DELL,
13372           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13373         { TG3PCI_SUBVENDOR_ID_DELL,
13374           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13375         { TG3PCI_SUBVENDOR_ID_DELL,
13376           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13377
13378         /* Compaq boards. */
13379         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13380           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13381         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13382           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13383         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13384           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13385         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13386           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13387         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13388           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13389
13390         /* IBM boards. */
13391         { TG3PCI_SUBVENDOR_ID_IBM,
13392           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13393 };
13394
13395 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13396 {
13397         int i;
13398
13399         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13400                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13401                      tp->pdev->subsystem_vendor) &&
13402                     (subsys_id_to_phy_id[i].subsys_devid ==
13403                      tp->pdev->subsystem_device))
13404                         return &subsys_id_to_phy_id[i];
13405         }
13406         return NULL;
13407 }
13408
13409 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13410 {
13411         u32 val;
13412
13413         tp->phy_id = TG3_PHY_ID_INVALID;
13414         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13415
13416         /* Assume an onboard device and WOL capable by default.  */
13417         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13418         tg3_flag_set(tp, WOL_CAP);
13419
13420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13421                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13422                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13423                         tg3_flag_set(tp, IS_NIC);
13424                 }
13425                 val = tr32(VCPU_CFGSHDW);
13426                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13427                         tg3_flag_set(tp, ASPM_WORKAROUND);
13428                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13429                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13430                         tg3_flag_set(tp, WOL_ENABLE);
13431                         device_set_wakeup_enable(&tp->pdev->dev, true);
13432                 }
13433                 goto done;
13434         }
13435
13436         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13437         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13438                 u32 nic_cfg, led_cfg;
13439                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13440                 int eeprom_phy_serdes = 0;
13441
13442                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13443                 tp->nic_sram_data_cfg = nic_cfg;
13444
13445                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13446                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13447                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13448                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13449                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13450                     (ver > 0) && (ver < 0x100))
13451                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13452
13453                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13454                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13455
13456                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13457                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13458                         eeprom_phy_serdes = 1;
13459
13460                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13461                 if (nic_phy_id != 0) {
13462                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13463                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13464
13465                         eeprom_phy_id  = (id1 >> 16) << 10;
13466                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13467                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13468                 } else
13469                         eeprom_phy_id = 0;
13470
13471                 tp->phy_id = eeprom_phy_id;
13472                 if (eeprom_phy_serdes) {
13473                         if (!tg3_flag(tp, 5705_PLUS))
13474                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13475                         else
13476                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13477                 }
13478
13479                 if (tg3_flag(tp, 5750_PLUS))
13480                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13481                                     SHASTA_EXT_LED_MODE_MASK);
13482                 else
13483                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13484
13485                 switch (led_cfg) {
13486                 default:
13487                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13488                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13489                         break;
13490
13491                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13492                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13493                         break;
13494
13495                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13496                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13497
13498                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13499                          * read on some older 5700/5701 bootcode.
13500                          */
13501                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13502                             ASIC_REV_5700 ||
13503                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13504                             ASIC_REV_5701)
13505                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13506
13507                         break;
13508
13509                 case SHASTA_EXT_LED_SHARED:
13510                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13511                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13512                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13513                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13514                                                  LED_CTRL_MODE_PHY_2);
13515                         break;
13516
13517                 case SHASTA_EXT_LED_MAC:
13518                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13519                         break;
13520
13521                 case SHASTA_EXT_LED_COMBO:
13522                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13523                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13524                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13525                                                  LED_CTRL_MODE_PHY_2);
13526                         break;
13527
13528                 }
13529
13530                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13531                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13532                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13533                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13534
13535                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13536                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13537
13538                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13539                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13540                         if ((tp->pdev->subsystem_vendor ==
13541                              PCI_VENDOR_ID_ARIMA) &&
13542                             (tp->pdev->subsystem_device == 0x205a ||
13543                              tp->pdev->subsystem_device == 0x2063))
13544                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13545                 } else {
13546                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13547                         tg3_flag_set(tp, IS_NIC);
13548                 }
13549
13550                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13551                         tg3_flag_set(tp, ENABLE_ASF);
13552                         if (tg3_flag(tp, 5750_PLUS))
13553                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13554                 }
13555
13556                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13557                     tg3_flag(tp, 5750_PLUS))
13558                         tg3_flag_set(tp, ENABLE_APE);
13559
13560                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13561                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13562                         tg3_flag_clear(tp, WOL_CAP);
13563
13564                 if (tg3_flag(tp, WOL_CAP) &&
13565                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13566                         tg3_flag_set(tp, WOL_ENABLE);
13567                         device_set_wakeup_enable(&tp->pdev->dev, true);
13568                 }
13569
13570                 if (cfg2 & (1 << 17))
13571                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13572
13573                 /* serdes signal pre-emphasis in register 0x590 set by */
13574                 /* bootcode if bit 18 is set */
13575                 if (cfg2 & (1 << 18))
13576                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13577
13578                 if ((tg3_flag(tp, 57765_PLUS) ||
13579                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13580                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13581                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13582                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13583
13584                 if (tg3_flag(tp, PCI_EXPRESS) &&
13585                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13586                     !tg3_flag(tp, 57765_PLUS)) {
13587                         u32 cfg3;
13588
13589                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13590                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13591                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13592                 }
13593
13594                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13595                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13596                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13597                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13598                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13599                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13600         }
13601 done:
13602         if (tg3_flag(tp, WOL_CAP))
13603                 device_set_wakeup_enable(&tp->pdev->dev,
13604                                          tg3_flag(tp, WOL_ENABLE));
13605         else
13606                 device_set_wakeup_capable(&tp->pdev->dev, false);
13607 }
13608
13609 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13610 {
13611         int i;
13612         u32 val;
13613
13614         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13615         tw32(OTP_CTRL, cmd);
13616
13617         /* Wait for up to 1 ms for command to execute. */
13618         for (i = 0; i < 100; i++) {
13619                 val = tr32(OTP_STATUS);
13620                 if (val & OTP_STATUS_CMD_DONE)
13621                         break;
13622                 udelay(10);
13623         }
13624
13625         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13626 }
13627
13628 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13629  * configuration is a 32-bit value that straddles the alignment boundary.
13630  * We do two 32-bit reads and then shift and merge the results.
13631  */
13632 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13633 {
13634         u32 bhalf_otp, thalf_otp;
13635
13636         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13637
13638         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13639                 return 0;
13640
13641         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13642
13643         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13644                 return 0;
13645
13646         thalf_otp = tr32(OTP_READ_DATA);
13647
13648         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13649
13650         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13651                 return 0;
13652
13653         bhalf_otp = tr32(OTP_READ_DATA);
13654
13655         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13656 }
13657
13658 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13659 {
13660         u32 adv = ADVERTISED_Autoneg;
13661
13662         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13663                 adv |= ADVERTISED_1000baseT_Half |
13664                        ADVERTISED_1000baseT_Full;
13665
13666         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13667                 adv |= ADVERTISED_100baseT_Half |
13668                        ADVERTISED_100baseT_Full |
13669                        ADVERTISED_10baseT_Half |
13670                        ADVERTISED_10baseT_Full |
13671                        ADVERTISED_TP;
13672         else
13673                 adv |= ADVERTISED_FIBRE;
13674
13675         tp->link_config.advertising = adv;
13676         tp->link_config.speed = SPEED_UNKNOWN;
13677         tp->link_config.duplex = DUPLEX_UNKNOWN;
13678         tp->link_config.autoneg = AUTONEG_ENABLE;
13679         tp->link_config.active_speed = SPEED_UNKNOWN;
13680         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13681
13682         tp->old_link = -1;
13683 }
13684
13685 static int __devinit tg3_phy_probe(struct tg3 *tp)
13686 {
13687         u32 hw_phy_id_1, hw_phy_id_2;
13688         u32 hw_phy_id, hw_phy_id_masked;
13689         int err;
13690
13691         /* flow control autonegotiation is default behavior */
13692         tg3_flag_set(tp, PAUSE_AUTONEG);
13693         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13694
13695         if (tg3_flag(tp, ENABLE_APE)) {
13696                 switch (tp->pci_fn) {
13697                 case 0:
13698                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13699                         break;
13700                 case 1:
13701                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13702                         break;
13703                 case 2:
13704                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13705                         break;
13706                 case 3:
13707                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13708                         break;
13709                 }
13710         }
13711
13712         if (tg3_flag(tp, USE_PHYLIB))
13713                 return tg3_phy_init(tp);
13714
13715         /* Reading the PHY ID register can conflict with ASF
13716          * firmware access to the PHY hardware.
13717          */
13718         err = 0;
13719         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13720                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13721         } else {
13722                 /* Now read the physical PHY_ID from the chip and verify
13723                  * that it is sane.  If it doesn't look good, we fall back
13724                  * to either the hard-coded table based PHY_ID and failing
13725                  * that the value found in the eeprom area.
13726                  */
13727                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13728                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13729
13730                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13731                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13732                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13733
13734                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13735         }
13736
13737         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13738                 tp->phy_id = hw_phy_id;
13739                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13740                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13741                 else
13742                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13743         } else {
13744                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13745                         /* Do nothing, phy ID already set up in
13746                          * tg3_get_eeprom_hw_cfg().
13747                          */
13748                 } else {
13749                         struct subsys_tbl_ent *p;
13750
13751                         /* No eeprom signature?  Try the hardcoded
13752                          * subsys device table.
13753                          */
13754                         p = tg3_lookup_by_subsys(tp);
13755                         if (!p)
13756                                 return -ENODEV;
13757
13758                         tp->phy_id = p->phy_id;
13759                         if (!tp->phy_id ||
13760                             tp->phy_id == TG3_PHY_ID_BCM8002)
13761                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13762                 }
13763         }
13764
13765         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13766             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13767              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13768              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13769               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13770              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13771               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13772                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13773
13774         tg3_phy_init_link_config(tp);
13775
13776         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13777             !tg3_flag(tp, ENABLE_APE) &&
13778             !tg3_flag(tp, ENABLE_ASF)) {
13779                 u32 bmsr, dummy;
13780
13781                 tg3_readphy(tp, MII_BMSR, &bmsr);
13782                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13783                     (bmsr & BMSR_LSTATUS))
13784                         goto skip_phy_reset;
13785
13786                 err = tg3_phy_reset(tp);
13787                 if (err)
13788                         return err;
13789
13790                 tg3_phy_set_wirespeed(tp);
13791
13792                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13793                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13794                                             tp->link_config.flowctrl);
13795
13796                         tg3_writephy(tp, MII_BMCR,
13797                                      BMCR_ANENABLE | BMCR_ANRESTART);
13798                 }
13799         }
13800
13801 skip_phy_reset:
13802         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13803                 err = tg3_init_5401phy_dsp(tp);
13804                 if (err)
13805                         return err;
13806
13807                 err = tg3_init_5401phy_dsp(tp);
13808         }
13809
13810         return err;
13811 }
13812
13813 static void __devinit tg3_read_vpd(struct tg3 *tp)
13814 {
13815         u8 *vpd_data;
13816         unsigned int block_end, rosize, len;
13817         u32 vpdlen;
13818         int j, i = 0;
13819
13820         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13821         if (!vpd_data)
13822                 goto out_no_vpd;
13823
13824         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13825         if (i < 0)
13826                 goto out_not_found;
13827
13828         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13829         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13830         i += PCI_VPD_LRDT_TAG_SIZE;
13831
13832         if (block_end > vpdlen)
13833                 goto out_not_found;
13834
13835         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13836                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13837         if (j > 0) {
13838                 len = pci_vpd_info_field_size(&vpd_data[j]);
13839
13840                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13841                 if (j + len > block_end || len != 4 ||
13842                     memcmp(&vpd_data[j], "1028", 4))
13843                         goto partno;
13844
13845                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13846                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13847                 if (j < 0)
13848                         goto partno;
13849
13850                 len = pci_vpd_info_field_size(&vpd_data[j]);
13851
13852                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13853                 if (j + len > block_end)
13854                         goto partno;
13855
13856                 memcpy(tp->fw_ver, &vpd_data[j], len);
13857                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13858         }
13859
13860 partno:
13861         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13862                                       PCI_VPD_RO_KEYWORD_PARTNO);
13863         if (i < 0)
13864                 goto out_not_found;
13865
13866         len = pci_vpd_info_field_size(&vpd_data[i]);
13867
13868         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13869         if (len > TG3_BPN_SIZE ||
13870             (len + i) > vpdlen)
13871                 goto out_not_found;
13872
13873         memcpy(tp->board_part_number, &vpd_data[i], len);
13874
13875 out_not_found:
13876         kfree(vpd_data);
13877         if (tp->board_part_number[0])
13878                 return;
13879
13880 out_no_vpd:
13881         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13882                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13883                         strcpy(tp->board_part_number, "BCM5717");
13884                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13885                         strcpy(tp->board_part_number, "BCM5718");
13886                 else
13887                         goto nomatch;
13888         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13889                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13890                         strcpy(tp->board_part_number, "BCM57780");
13891                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13892                         strcpy(tp->board_part_number, "BCM57760");
13893                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13894                         strcpy(tp->board_part_number, "BCM57790");
13895                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13896                         strcpy(tp->board_part_number, "BCM57788");
13897                 else
13898                         goto nomatch;
13899         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13900                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13901                         strcpy(tp->board_part_number, "BCM57761");
13902                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13903                         strcpy(tp->board_part_number, "BCM57765");
13904                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13905                         strcpy(tp->board_part_number, "BCM57781");
13906                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13907                         strcpy(tp->board_part_number, "BCM57785");
13908                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13909                         strcpy(tp->board_part_number, "BCM57791");
13910                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13911                         strcpy(tp->board_part_number, "BCM57795");
13912                 else
13913                         goto nomatch;
13914         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13915                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13916                         strcpy(tp->board_part_number, "BCM57762");
13917                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13918                         strcpy(tp->board_part_number, "BCM57766");
13919                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13920                         strcpy(tp->board_part_number, "BCM57782");
13921                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13922                         strcpy(tp->board_part_number, "BCM57786");
13923                 else
13924                         goto nomatch;
13925         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13926                 strcpy(tp->board_part_number, "BCM95906");
13927         } else {
13928 nomatch:
13929                 strcpy(tp->board_part_number, "none");
13930         }
13931 }
13932
13933 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13934 {
13935         u32 val;
13936
13937         if (tg3_nvram_read(tp, offset, &val) ||
13938             (val & 0xfc000000) != 0x0c000000 ||
13939             tg3_nvram_read(tp, offset + 4, &val) ||
13940             val != 0)
13941                 return 0;
13942
13943         return 1;
13944 }
13945
13946 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13947 {
13948         u32 val, offset, start, ver_offset;
13949         int i, dst_off;
13950         bool newver = false;
13951
13952         if (tg3_nvram_read(tp, 0xc, &offset) ||
13953             tg3_nvram_read(tp, 0x4, &start))
13954                 return;
13955
13956         offset = tg3_nvram_logical_addr(tp, offset);
13957
13958         if (tg3_nvram_read(tp, offset, &val))
13959                 return;
13960
13961         if ((val & 0xfc000000) == 0x0c000000) {
13962                 if (tg3_nvram_read(tp, offset + 4, &val))
13963                         return;
13964
13965                 if (val == 0)
13966                         newver = true;
13967         }
13968
13969         dst_off = strlen(tp->fw_ver);
13970
13971         if (newver) {
13972                 if (TG3_VER_SIZE - dst_off < 16 ||
13973                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13974                         return;
13975
13976                 offset = offset + ver_offset - start;
13977                 for (i = 0; i < 16; i += 4) {
13978                         __be32 v;
13979                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13980                                 return;
13981
13982                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13983                 }
13984         } else {
13985                 u32 major, minor;
13986
13987                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13988                         return;
13989
13990                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13991                         TG3_NVM_BCVER_MAJSFT;
13992                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13993                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13994                          "v%d.%02d", major, minor);
13995         }
13996 }
13997
13998 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13999 {
14000         u32 val, major, minor;
14001
14002         /* Use native endian representation */
14003         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14004                 return;
14005
14006         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14007                 TG3_NVM_HWSB_CFG1_MAJSFT;
14008         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14009                 TG3_NVM_HWSB_CFG1_MINSFT;
14010
14011         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14012 }
14013
14014 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14015 {
14016         u32 offset, major, minor, build;
14017
14018         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14019
14020         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14021                 return;
14022
14023         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14024         case TG3_EEPROM_SB_REVISION_0:
14025                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14026                 break;
14027         case TG3_EEPROM_SB_REVISION_2:
14028                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14029                 break;
14030         case TG3_EEPROM_SB_REVISION_3:
14031                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14032                 break;
14033         case TG3_EEPROM_SB_REVISION_4:
14034                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14035                 break;
14036         case TG3_EEPROM_SB_REVISION_5:
14037                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14038                 break;
14039         case TG3_EEPROM_SB_REVISION_6:
14040                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14041                 break;
14042         default:
14043                 return;
14044         }
14045
14046         if (tg3_nvram_read(tp, offset, &val))
14047                 return;
14048
14049         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14050                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14051         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14052                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14053         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14054
14055         if (minor > 99 || build > 26)
14056                 return;
14057
14058         offset = strlen(tp->fw_ver);
14059         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14060                  " v%d.%02d", major, minor);
14061
14062         if (build > 0) {
14063                 offset = strlen(tp->fw_ver);
14064                 if (offset < TG3_VER_SIZE - 1)
14065                         tp->fw_ver[offset] = 'a' + build - 1;
14066         }
14067 }
14068
14069 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14070 {
14071         u32 val, offset, start;
14072         int i, vlen;
14073
14074         for (offset = TG3_NVM_DIR_START;
14075              offset < TG3_NVM_DIR_END;
14076              offset += TG3_NVM_DIRENT_SIZE) {
14077                 if (tg3_nvram_read(tp, offset, &val))
14078                         return;
14079
14080                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14081                         break;
14082         }
14083
14084         if (offset == TG3_NVM_DIR_END)
14085                 return;
14086
14087         if (!tg3_flag(tp, 5705_PLUS))
14088                 start = 0x08000000;
14089         else if (tg3_nvram_read(tp, offset - 4, &start))
14090                 return;
14091
14092         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14093             !tg3_fw_img_is_valid(tp, offset) ||
14094             tg3_nvram_read(tp, offset + 8, &val))
14095                 return;
14096
14097         offset += val - start;
14098
14099         vlen = strlen(tp->fw_ver);
14100
14101         tp->fw_ver[vlen++] = ',';
14102         tp->fw_ver[vlen++] = ' ';
14103
14104         for (i = 0; i < 4; i++) {
14105                 __be32 v;
14106                 if (tg3_nvram_read_be32(tp, offset, &v))
14107                         return;
14108
14109                 offset += sizeof(v);
14110
14111                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14112                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14113                         break;
14114                 }
14115
14116                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14117                 vlen += sizeof(v);
14118         }
14119 }
14120
14121 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14122 {
14123         u32 apedata;
14124
14125         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14126         if (apedata != APE_SEG_SIG_MAGIC)
14127                 return;
14128
14129         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14130         if (!(apedata & APE_FW_STATUS_READY))
14131                 return;
14132
14133         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14134                 tg3_flag_set(tp, APE_HAS_NCSI);
14135 }
14136
14137 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14138 {
14139         int vlen;
14140         u32 apedata;
14141         char *fwtype;
14142
14143         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14144
14145         if (tg3_flag(tp, APE_HAS_NCSI))
14146                 fwtype = "NCSI";
14147         else
14148                 fwtype = "DASH";
14149
14150         vlen = strlen(tp->fw_ver);
14151
14152         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14153                  fwtype,
14154                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14155                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14156                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14157                  (apedata & APE_FW_VERSION_BLDMSK));
14158 }
14159
14160 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14161 {
14162         u32 val;
14163         bool vpd_vers = false;
14164
14165         if (tp->fw_ver[0] != 0)
14166                 vpd_vers = true;
14167
14168         if (tg3_flag(tp, NO_NVRAM)) {
14169                 strcat(tp->fw_ver, "sb");
14170                 return;
14171         }
14172
14173         if (tg3_nvram_read(tp, 0, &val))
14174                 return;
14175
14176         if (val == TG3_EEPROM_MAGIC)
14177                 tg3_read_bc_ver(tp);
14178         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14179                 tg3_read_sb_ver(tp, val);
14180         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14181                 tg3_read_hwsb_ver(tp);
14182
14183         if (tg3_flag(tp, ENABLE_ASF)) {
14184                 if (tg3_flag(tp, ENABLE_APE)) {
14185                         tg3_probe_ncsi(tp);
14186                         if (!vpd_vers)
14187                                 tg3_read_dash_ver(tp);
14188                 } else if (!vpd_vers) {
14189                         tg3_read_mgmtfw_ver(tp);
14190                 }
14191         }
14192
14193         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14194 }
14195
14196 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14197 {
14198         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14199                 return TG3_RX_RET_MAX_SIZE_5717;
14200         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14201                 return TG3_RX_RET_MAX_SIZE_5700;
14202         else
14203                 return TG3_RX_RET_MAX_SIZE_5705;
14204 }
14205
14206 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14207         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14208         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14209         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14210         { },
14211 };
14212
14213 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14214 {
14215         struct pci_dev *peer;
14216         unsigned int func, devnr = tp->pdev->devfn & ~7;
14217
14218         for (func = 0; func < 8; func++) {
14219                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14220                 if (peer && peer != tp->pdev)
14221                         break;
14222                 pci_dev_put(peer);
14223         }
14224         /* 5704 can be configured in single-port mode, set peer to
14225          * tp->pdev in that case.
14226          */
14227         if (!peer) {
14228                 peer = tp->pdev;
14229                 return peer;
14230         }
14231
14232         /*
14233          * We don't need to keep the refcount elevated; there's no way
14234          * to remove one half of this device without removing the other
14235          */
14236         pci_dev_put(peer);
14237
14238         return peer;
14239 }
14240
14241 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14242 {
14243         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14244         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14245                 u32 reg;
14246
14247                 /* All devices that use the alternate
14248                  * ASIC REV location have a CPMU.
14249                  */
14250                 tg3_flag_set(tp, CPMU_PRESENT);
14251
14252                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14253                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14254                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14255                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14256                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14257                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14258                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14259                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14260                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14261                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14262                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14263                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14264                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14265                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14266                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14267                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14268                 else
14269                         reg = TG3PCI_PRODID_ASICREV;
14270
14271                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14272         }
14273
14274         /* Wrong chip ID in 5752 A0. This code can be removed later
14275          * as A0 is not in production.
14276          */
14277         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14278                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14279
14280         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14281             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14282             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14283                 tg3_flag_set(tp, 5717_PLUS);
14284
14285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14287                 tg3_flag_set(tp, 57765_CLASS);
14288
14289         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14290                 tg3_flag_set(tp, 57765_PLUS);
14291
14292         /* Intentionally exclude ASIC_REV_5906 */
14293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14295             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14298             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14299             tg3_flag(tp, 57765_PLUS))
14300                 tg3_flag_set(tp, 5755_PLUS);
14301
14302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14303             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14304                 tg3_flag_set(tp, 5780_CLASS);
14305
14306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14307             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14308             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14309             tg3_flag(tp, 5755_PLUS) ||
14310             tg3_flag(tp, 5780_CLASS))
14311                 tg3_flag_set(tp, 5750_PLUS);
14312
14313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14314             tg3_flag(tp, 5750_PLUS))
14315                 tg3_flag_set(tp, 5705_PLUS);
14316 }
14317
14318 static int __devinit tg3_get_invariants(struct tg3 *tp)
14319 {
14320         u32 misc_ctrl_reg;
14321         u32 pci_state_reg, grc_misc_cfg;
14322         u32 val;
14323         u16 pci_cmd;
14324         int err;
14325
14326         /* Force memory write invalidate off.  If we leave it on,
14327          * then on 5700_BX chips we have to enable a workaround.
14328          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14329          * to match the cacheline size.  The Broadcom driver have this
14330          * workaround but turns MWI off all the times so never uses
14331          * it.  This seems to suggest that the workaround is insufficient.
14332          */
14333         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14334         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14335         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14336
14337         /* Important! -- Make sure register accesses are byteswapped
14338          * correctly.  Also, for those chips that require it, make
14339          * sure that indirect register accesses are enabled before
14340          * the first operation.
14341          */
14342         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14343                               &misc_ctrl_reg);
14344         tp->misc_host_ctrl |= (misc_ctrl_reg &
14345                                MISC_HOST_CTRL_CHIPREV);
14346         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14347                                tp->misc_host_ctrl);
14348
14349         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14350
14351         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14352          * we need to disable memory and use config. cycles
14353          * only to access all registers. The 5702/03 chips
14354          * can mistakenly decode the special cycles from the
14355          * ICH chipsets as memory write cycles, causing corruption
14356          * of register and memory space. Only certain ICH bridges
14357          * will drive special cycles with non-zero data during the
14358          * address phase which can fall within the 5703's address
14359          * range. This is not an ICH bug as the PCI spec allows
14360          * non-zero address during special cycles. However, only
14361          * these ICH bridges are known to drive non-zero addresses
14362          * during special cycles.
14363          *
14364          * Since special cycles do not cross PCI bridges, we only
14365          * enable this workaround if the 5703 is on the secondary
14366          * bus of these ICH bridges.
14367          */
14368         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14369             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14370                 static struct tg3_dev_id {
14371                         u32     vendor;
14372                         u32     device;
14373                         u32     rev;
14374                 } ich_chipsets[] = {
14375                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14376                           PCI_ANY_ID },
14377                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14378                           PCI_ANY_ID },
14379                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14380                           0xa },
14381                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14382                           PCI_ANY_ID },
14383                         { },
14384                 };
14385                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14386                 struct pci_dev *bridge = NULL;
14387
14388                 while (pci_id->vendor != 0) {
14389                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14390                                                 bridge);
14391                         if (!bridge) {
14392                                 pci_id++;
14393                                 continue;
14394                         }
14395                         if (pci_id->rev != PCI_ANY_ID) {
14396                                 if (bridge->revision > pci_id->rev)
14397                                         continue;
14398                         }
14399                         if (bridge->subordinate &&
14400                             (bridge->subordinate->number ==
14401                              tp->pdev->bus->number)) {
14402                                 tg3_flag_set(tp, ICH_WORKAROUND);
14403                                 pci_dev_put(bridge);
14404                                 break;
14405                         }
14406                 }
14407         }
14408
14409         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14410                 static struct tg3_dev_id {
14411                         u32     vendor;
14412                         u32     device;
14413                 } bridge_chipsets[] = {
14414                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14415                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14416                         { },
14417                 };
14418                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14419                 struct pci_dev *bridge = NULL;
14420
14421                 while (pci_id->vendor != 0) {
14422                         bridge = pci_get_device(pci_id->vendor,
14423                                                 pci_id->device,
14424                                                 bridge);
14425                         if (!bridge) {
14426                                 pci_id++;
14427                                 continue;
14428                         }
14429                         if (bridge->subordinate &&
14430                             (bridge->subordinate->number <=
14431                              tp->pdev->bus->number) &&
14432                             (bridge->subordinate->busn_res.end >=
14433                              tp->pdev->bus->number)) {
14434                                 tg3_flag_set(tp, 5701_DMA_BUG);
14435                                 pci_dev_put(bridge);
14436                                 break;
14437                         }
14438                 }
14439         }
14440
14441         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14442          * DMA addresses > 40-bit. This bridge may have other additional
14443          * 57xx devices behind it in some 4-port NIC designs for example.
14444          * Any tg3 device found behind the bridge will also need the 40-bit
14445          * DMA workaround.
14446          */
14447         if (tg3_flag(tp, 5780_CLASS)) {
14448                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14449                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14450         } else {
14451                 struct pci_dev *bridge = NULL;
14452
14453                 do {
14454                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14455                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14456                                                 bridge);
14457                         if (bridge && bridge->subordinate &&
14458                             (bridge->subordinate->number <=
14459                              tp->pdev->bus->number) &&
14460                             (bridge->subordinate->busn_res.end >=
14461                              tp->pdev->bus->number)) {
14462                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14463                                 pci_dev_put(bridge);
14464                                 break;
14465                         }
14466                 } while (bridge);
14467         }
14468
14469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14471                 tp->pdev_peer = tg3_find_peer(tp);
14472
14473         /* Determine TSO capabilities */
14474         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14475                 ; /* Do nothing. HW bug. */
14476         else if (tg3_flag(tp, 57765_PLUS))
14477                 tg3_flag_set(tp, HW_TSO_3);
14478         else if (tg3_flag(tp, 5755_PLUS) ||
14479                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14480                 tg3_flag_set(tp, HW_TSO_2);
14481         else if (tg3_flag(tp, 5750_PLUS)) {
14482                 tg3_flag_set(tp, HW_TSO_1);
14483                 tg3_flag_set(tp, TSO_BUG);
14484                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14485                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14486                         tg3_flag_clear(tp, TSO_BUG);
14487         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14488                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14489                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14490                         tg3_flag_set(tp, TSO_BUG);
14491                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14492                         tp->fw_needed = FIRMWARE_TG3TSO5;
14493                 else
14494                         tp->fw_needed = FIRMWARE_TG3TSO;
14495         }
14496
14497         /* Selectively allow TSO based on operating conditions */
14498         if (tg3_flag(tp, HW_TSO_1) ||
14499             tg3_flag(tp, HW_TSO_2) ||
14500             tg3_flag(tp, HW_TSO_3) ||
14501             tp->fw_needed) {
14502                 /* For firmware TSO, assume ASF is disabled.
14503                  * We'll disable TSO later if we discover ASF
14504                  * is enabled in tg3_get_eeprom_hw_cfg().
14505                  */
14506                 tg3_flag_set(tp, TSO_CAPABLE);
14507         } else {
14508                 tg3_flag_clear(tp, TSO_CAPABLE);
14509                 tg3_flag_clear(tp, TSO_BUG);
14510                 tp->fw_needed = NULL;
14511         }
14512
14513         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14514                 tp->fw_needed = FIRMWARE_TG3;
14515
14516         tp->irq_max = 1;
14517
14518         if (tg3_flag(tp, 5750_PLUS)) {
14519                 tg3_flag_set(tp, SUPPORT_MSI);
14520                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14521                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14522                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14523                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14524                      tp->pdev_peer == tp->pdev))
14525                         tg3_flag_clear(tp, SUPPORT_MSI);
14526
14527                 if (tg3_flag(tp, 5755_PLUS) ||
14528                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14529                         tg3_flag_set(tp, 1SHOT_MSI);
14530                 }
14531
14532                 if (tg3_flag(tp, 57765_PLUS)) {
14533                         tg3_flag_set(tp, SUPPORT_MSIX);
14534                         tp->irq_max = TG3_IRQ_MAX_VECS;
14535                         tg3_rss_init_dflt_indir_tbl(tp);
14536                 }
14537         }
14538
14539         if (tg3_flag(tp, 5755_PLUS) ||
14540             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14541                 tg3_flag_set(tp, SHORT_DMA_BUG);
14542
14543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14544                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14545
14546         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14547             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14548             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14549                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14550
14551         if (tg3_flag(tp, 57765_PLUS) &&
14552             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14553                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14554
14555         if (!tg3_flag(tp, 5705_PLUS) ||
14556             tg3_flag(tp, 5780_CLASS) ||
14557             tg3_flag(tp, USE_JUMBO_BDFLAG))
14558                 tg3_flag_set(tp, JUMBO_CAPABLE);
14559
14560         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14561                               &pci_state_reg);
14562
14563         if (pci_is_pcie(tp->pdev)) {
14564                 u16 lnkctl;
14565
14566                 tg3_flag_set(tp, PCI_EXPRESS);
14567
14568                 pci_read_config_word(tp->pdev,
14569                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14570                                      &lnkctl);
14571                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14572                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14573                             ASIC_REV_5906) {
14574                                 tg3_flag_clear(tp, HW_TSO_2);
14575                                 tg3_flag_clear(tp, TSO_CAPABLE);
14576                         }
14577                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14578                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14579                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14580                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14581                                 tg3_flag_set(tp, CLKREQ_BUG);
14582                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14583                         tg3_flag_set(tp, L1PLLPD_EN);
14584                 }
14585         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14586                 /* BCM5785 devices are effectively PCIe devices, and should
14587                  * follow PCIe codepaths, but do not have a PCIe capabilities
14588                  * section.
14589                  */
14590                 tg3_flag_set(tp, PCI_EXPRESS);
14591         } else if (!tg3_flag(tp, 5705_PLUS) ||
14592                    tg3_flag(tp, 5780_CLASS)) {
14593                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14594                 if (!tp->pcix_cap) {
14595                         dev_err(&tp->pdev->dev,
14596                                 "Cannot find PCI-X capability, aborting\n");
14597                         return -EIO;
14598                 }
14599
14600                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14601                         tg3_flag_set(tp, PCIX_MODE);
14602         }
14603
14604         /* If we have an AMD 762 or VIA K8T800 chipset, write
14605          * reordering to the mailbox registers done by the host
14606          * controller can cause major troubles.  We read back from
14607          * every mailbox register write to force the writes to be
14608          * posted to the chip in order.
14609          */
14610         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14611             !tg3_flag(tp, PCI_EXPRESS))
14612                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14613
14614         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14615                              &tp->pci_cacheline_sz);
14616         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14617                              &tp->pci_lat_timer);
14618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14619             tp->pci_lat_timer < 64) {
14620                 tp->pci_lat_timer = 64;
14621                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14622                                       tp->pci_lat_timer);
14623         }
14624
14625         /* Important! -- It is critical that the PCI-X hw workaround
14626          * situation is decided before the first MMIO register access.
14627          */
14628         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14629                 /* 5700 BX chips need to have their TX producer index
14630                  * mailboxes written twice to workaround a bug.
14631                  */
14632                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14633
14634                 /* If we are in PCI-X mode, enable register write workaround.
14635                  *
14636                  * The workaround is to use indirect register accesses
14637                  * for all chip writes not to mailbox registers.
14638                  */
14639                 if (tg3_flag(tp, PCIX_MODE)) {
14640                         u32 pm_reg;
14641
14642                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14643
14644                         /* The chip can have it's power management PCI config
14645                          * space registers clobbered due to this bug.
14646                          * So explicitly force the chip into D0 here.
14647                          */
14648                         pci_read_config_dword(tp->pdev,
14649                                               tp->pm_cap + PCI_PM_CTRL,
14650                                               &pm_reg);
14651                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14652                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14653                         pci_write_config_dword(tp->pdev,
14654                                                tp->pm_cap + PCI_PM_CTRL,
14655                                                pm_reg);
14656
14657                         /* Also, force SERR#/PERR# in PCI command. */
14658                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14659                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14660                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14661                 }
14662         }
14663
14664         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14665                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14666         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14667                 tg3_flag_set(tp, PCI_32BIT);
14668
14669         /* Chip-specific fixup from Broadcom driver */
14670         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14671             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14672                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14673                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14674         }
14675
14676         /* Default fast path register access methods */
14677         tp->read32 = tg3_read32;
14678         tp->write32 = tg3_write32;
14679         tp->read32_mbox = tg3_read32;
14680         tp->write32_mbox = tg3_write32;
14681         tp->write32_tx_mbox = tg3_write32;
14682         tp->write32_rx_mbox = tg3_write32;
14683
14684         /* Various workaround register access methods */
14685         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14686                 tp->write32 = tg3_write_indirect_reg32;
14687         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14688                  (tg3_flag(tp, PCI_EXPRESS) &&
14689                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14690                 /*
14691                  * Back to back register writes can cause problems on these
14692                  * chips, the workaround is to read back all reg writes
14693                  * except those to mailbox regs.
14694                  *
14695                  * See tg3_write_indirect_reg32().
14696                  */
14697                 tp->write32 = tg3_write_flush_reg32;
14698         }
14699
14700         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14701                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14702                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14703                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14704         }
14705
14706         if (tg3_flag(tp, ICH_WORKAROUND)) {
14707                 tp->read32 = tg3_read_indirect_reg32;
14708                 tp->write32 = tg3_write_indirect_reg32;
14709                 tp->read32_mbox = tg3_read_indirect_mbox;
14710                 tp->write32_mbox = tg3_write_indirect_mbox;
14711                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14712                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14713
14714                 iounmap(tp->regs);
14715                 tp->regs = NULL;
14716
14717                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14718                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14719                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14720         }
14721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14722                 tp->read32_mbox = tg3_read32_mbox_5906;
14723                 tp->write32_mbox = tg3_write32_mbox_5906;
14724                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14725                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14726         }
14727
14728         if (tp->write32 == tg3_write_indirect_reg32 ||
14729             (tg3_flag(tp, PCIX_MODE) &&
14730              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14731               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14732                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14733
14734         /* The memory arbiter has to be enabled in order for SRAM accesses
14735          * to succeed.  Normally on powerup the tg3 chip firmware will make
14736          * sure it is enabled, but other entities such as system netboot
14737          * code might disable it.
14738          */
14739         val = tr32(MEMARB_MODE);
14740         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14741
14742         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14743         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14744             tg3_flag(tp, 5780_CLASS)) {
14745                 if (tg3_flag(tp, PCIX_MODE)) {
14746                         pci_read_config_dword(tp->pdev,
14747                                               tp->pcix_cap + PCI_X_STATUS,
14748                                               &val);
14749                         tp->pci_fn = val & 0x7;
14750                 }
14751         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14752                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14753                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14754                     NIC_SRAM_CPMUSTAT_SIG) {
14755                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14756                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14757                 }
14758         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14759                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14760                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14761                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14762                     NIC_SRAM_CPMUSTAT_SIG) {
14763                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14764                                      TG3_CPMU_STATUS_FSHFT_5719;
14765                 }
14766         }
14767
14768         /* Get eeprom hw config before calling tg3_set_power_state().
14769          * In particular, the TG3_FLAG_IS_NIC flag must be
14770          * determined before calling tg3_set_power_state() so that
14771          * we know whether or not to switch out of Vaux power.
14772          * When the flag is set, it means that GPIO1 is used for eeprom
14773          * write protect and also implies that it is a LOM where GPIOs
14774          * are not used to switch power.
14775          */
14776         tg3_get_eeprom_hw_cfg(tp);
14777
14778         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14779                 tg3_flag_clear(tp, TSO_CAPABLE);
14780                 tg3_flag_clear(tp, TSO_BUG);
14781                 tp->fw_needed = NULL;
14782         }
14783
14784         if (tg3_flag(tp, ENABLE_APE)) {
14785                 /* Allow reads and writes to the
14786                  * APE register and memory space.
14787                  */
14788                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14789                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14790                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14791                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14792                                        pci_state_reg);
14793
14794                 tg3_ape_lock_init(tp);
14795         }
14796
14797         /* Set up tp->grc_local_ctrl before calling
14798          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14799          * will bring 5700's external PHY out of reset.
14800          * It is also used as eeprom write protect on LOMs.
14801          */
14802         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14804             tg3_flag(tp, EEPROM_WRITE_PROT))
14805                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14806                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14807         /* Unused GPIO3 must be driven as output on 5752 because there
14808          * are no pull-up resistors on unused GPIO pins.
14809          */
14810         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14811                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14812
14813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14815             tg3_flag(tp, 57765_CLASS))
14816                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14817
14818         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14819             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14820                 /* Turn off the debug UART. */
14821                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14822                 if (tg3_flag(tp, IS_NIC))
14823                         /* Keep VMain power. */
14824                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14825                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14826         }
14827
14828         /* Switch out of Vaux if it is a NIC */
14829         tg3_pwrsrc_switch_to_vmain(tp);
14830
14831         /* Derive initial jumbo mode from MTU assigned in
14832          * ether_setup() via the alloc_etherdev() call
14833          */
14834         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14835                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14836
14837         /* Determine WakeOnLan speed to use. */
14838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14839             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14840             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14841             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14842                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14843         } else {
14844                 tg3_flag_set(tp, WOL_SPEED_100MB);
14845         }
14846
14847         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14848                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14849
14850         /* A few boards don't want Ethernet@WireSpeed phy feature */
14851         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14852             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14853              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14854              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14855             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14856             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14857                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14858
14859         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14860             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14861                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14862         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14863                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14864
14865         if (tg3_flag(tp, 5705_PLUS) &&
14866             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14867             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14868             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14869             !tg3_flag(tp, 57765_PLUS)) {
14870                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14871                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14872                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14873                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14874                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14875                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14876                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14877                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14878                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14879                 } else
14880                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14881         }
14882
14883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14884             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14885                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14886                 if (tp->phy_otp == 0)
14887                         tp->phy_otp = TG3_OTP_DEFAULT;
14888         }
14889
14890         if (tg3_flag(tp, CPMU_PRESENT))
14891                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14892         else
14893                 tp->mi_mode = MAC_MI_MODE_BASE;
14894
14895         tp->coalesce_mode = 0;
14896         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14897             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14898                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14899
14900         /* Set these bits to enable statistics workaround. */
14901         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14902             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14903             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14904                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14905                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14906         }
14907
14908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14910                 tg3_flag_set(tp, USE_PHYLIB);
14911
14912         err = tg3_mdio_init(tp);
14913         if (err)
14914                 return err;
14915
14916         /* Initialize data/descriptor byte/word swapping. */
14917         val = tr32(GRC_MODE);
14918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14919                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14920                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14921                         GRC_MODE_B2HRX_ENABLE |
14922                         GRC_MODE_HTX2B_ENABLE |
14923                         GRC_MODE_HOST_STACKUP);
14924         else
14925                 val &= GRC_MODE_HOST_STACKUP;
14926
14927         tw32(GRC_MODE, val | tp->grc_mode);
14928
14929         tg3_switch_clocks(tp);
14930
14931         /* Clear this out for sanity. */
14932         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14933
14934         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14935                               &pci_state_reg);
14936         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14937             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14938                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14939
14940                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14941                     chiprevid == CHIPREV_ID_5701_B0 ||
14942                     chiprevid == CHIPREV_ID_5701_B2 ||
14943                     chiprevid == CHIPREV_ID_5701_B5) {
14944                         void __iomem *sram_base;
14945
14946                         /* Write some dummy words into the SRAM status block
14947                          * area, see if it reads back correctly.  If the return
14948                          * value is bad, force enable the PCIX workaround.
14949                          */
14950                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14951
14952                         writel(0x00000000, sram_base);
14953                         writel(0x00000000, sram_base + 4);
14954                         writel(0xffffffff, sram_base + 4);
14955                         if (readl(sram_base) != 0x00000000)
14956                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14957                 }
14958         }
14959
14960         udelay(50);
14961         tg3_nvram_init(tp);
14962
14963         grc_misc_cfg = tr32(GRC_MISC_CFG);
14964         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14965
14966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14967             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14968              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14969                 tg3_flag_set(tp, IS_5788);
14970
14971         if (!tg3_flag(tp, IS_5788) &&
14972             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14973                 tg3_flag_set(tp, TAGGED_STATUS);
14974         if (tg3_flag(tp, TAGGED_STATUS)) {
14975                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14976                                       HOSTCC_MODE_CLRTICK_TXBD);
14977
14978                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14979                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14980                                        tp->misc_host_ctrl);
14981         }
14982
14983         /* Preserve the APE MAC_MODE bits */
14984         if (tg3_flag(tp, ENABLE_APE))
14985                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14986         else
14987                 tp->mac_mode = 0;
14988
14989         /* these are limited to 10/100 only */
14990         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14991              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14992             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14993              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14994              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14995               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14996               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14997             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14998              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14999               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15000               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15001             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15002             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15003             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15004             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15005                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15006
15007         err = tg3_phy_probe(tp);
15008         if (err) {
15009                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15010                 /* ... but do not return immediately ... */
15011                 tg3_mdio_fini(tp);
15012         }
15013
15014         tg3_read_vpd(tp);
15015         tg3_read_fw_ver(tp);
15016
15017         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15018                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15019         } else {
15020                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15021                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15022                 else
15023                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15024         }
15025
15026         /* 5700 {AX,BX} chips have a broken status block link
15027          * change bit implementation, so we must use the
15028          * status register in those cases.
15029          */
15030         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15031                 tg3_flag_set(tp, USE_LINKCHG_REG);
15032         else
15033                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15034
15035         /* The led_ctrl is set during tg3_phy_probe, here we might
15036          * have to force the link status polling mechanism based
15037          * upon subsystem IDs.
15038          */
15039         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15041             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15042                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15043                 tg3_flag_set(tp, USE_LINKCHG_REG);
15044         }
15045
15046         /* For all SERDES we poll the MAC status register. */
15047         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15048                 tg3_flag_set(tp, POLL_SERDES);
15049         else
15050                 tg3_flag_clear(tp, POLL_SERDES);
15051
15052         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15053         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15054         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15055             tg3_flag(tp, PCIX_MODE)) {
15056                 tp->rx_offset = NET_SKB_PAD;
15057 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15058                 tp->rx_copy_thresh = ~(u16)0;
15059 #endif
15060         }
15061
15062         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15063         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15064         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15065
15066         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15067
15068         /* Increment the rx prod index on the rx std ring by at most
15069          * 8 for these chips to workaround hw errata.
15070          */
15071         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15072             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15073             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15074                 tp->rx_std_max_post = 8;
15075
15076         if (tg3_flag(tp, ASPM_WORKAROUND))
15077                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15078                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15079
15080         return err;
15081 }
15082
15083 #ifdef CONFIG_SPARC
15084 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15085 {
15086         struct net_device *dev = tp->dev;
15087         struct pci_dev *pdev = tp->pdev;
15088         struct device_node *dp = pci_device_to_OF_node(pdev);
15089         const unsigned char *addr;
15090         int len;
15091
15092         addr = of_get_property(dp, "local-mac-address", &len);
15093         if (addr && len == 6) {
15094                 memcpy(dev->dev_addr, addr, 6);
15095                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15096                 return 0;
15097         }
15098         return -ENODEV;
15099 }
15100
15101 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15102 {
15103         struct net_device *dev = tp->dev;
15104
15105         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15106         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15107         return 0;
15108 }
15109 #endif
15110
15111 static int __devinit tg3_get_device_address(struct tg3 *tp)
15112 {
15113         struct net_device *dev = tp->dev;
15114         u32 hi, lo, mac_offset;
15115         int addr_ok = 0;
15116
15117 #ifdef CONFIG_SPARC
15118         if (!tg3_get_macaddr_sparc(tp))
15119                 return 0;
15120 #endif
15121
15122         mac_offset = 0x7c;
15123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15124             tg3_flag(tp, 5780_CLASS)) {
15125                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15126                         mac_offset = 0xcc;
15127                 if (tg3_nvram_lock(tp))
15128                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15129                 else
15130                         tg3_nvram_unlock(tp);
15131         } else if (tg3_flag(tp, 5717_PLUS)) {
15132                 if (tp->pci_fn & 1)
15133                         mac_offset = 0xcc;
15134                 if (tp->pci_fn > 1)
15135                         mac_offset += 0x18c;
15136         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15137                 mac_offset = 0x10;
15138
15139         /* First try to get it from MAC address mailbox. */
15140         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15141         if ((hi >> 16) == 0x484b) {
15142                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15143                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15144
15145                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15146                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15147                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15148                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15149                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15150
15151                 /* Some old bootcode may report a 0 MAC address in SRAM */
15152                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15153         }
15154         if (!addr_ok) {
15155                 /* Next, try NVRAM. */
15156                 if (!tg3_flag(tp, NO_NVRAM) &&
15157                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15158                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15159                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15160                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15161                 }
15162                 /* Finally just fetch it out of the MAC control regs. */
15163                 else {
15164                         hi = tr32(MAC_ADDR_0_HIGH);
15165                         lo = tr32(MAC_ADDR_0_LOW);
15166
15167                         dev->dev_addr[5] = lo & 0xff;
15168                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15169                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15170                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15171                         dev->dev_addr[1] = hi & 0xff;
15172                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15173                 }
15174         }
15175
15176         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15177 #ifdef CONFIG_SPARC
15178                 if (!tg3_get_default_macaddr_sparc(tp))
15179                         return 0;
15180 #endif
15181                 return -EINVAL;
15182         }
15183         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15184         return 0;
15185 }
15186
15187 #define BOUNDARY_SINGLE_CACHELINE       1
15188 #define BOUNDARY_MULTI_CACHELINE        2
15189
15190 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15191 {
15192         int cacheline_size;
15193         u8 byte;
15194         int goal;
15195
15196         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15197         if (byte == 0)
15198                 cacheline_size = 1024;
15199         else
15200                 cacheline_size = (int) byte * 4;
15201
15202         /* On 5703 and later chips, the boundary bits have no
15203          * effect.
15204          */
15205         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15206             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15207             !tg3_flag(tp, PCI_EXPRESS))
15208                 goto out;
15209
15210 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15211         goal = BOUNDARY_MULTI_CACHELINE;
15212 #else
15213 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15214         goal = BOUNDARY_SINGLE_CACHELINE;
15215 #else
15216         goal = 0;
15217 #endif
15218 #endif
15219
15220         if (tg3_flag(tp, 57765_PLUS)) {
15221                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15222                 goto out;
15223         }
15224
15225         if (!goal)
15226                 goto out;
15227
15228         /* PCI controllers on most RISC systems tend to disconnect
15229          * when a device tries to burst across a cache-line boundary.
15230          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15231          *
15232          * Unfortunately, for PCI-E there are only limited
15233          * write-side controls for this, and thus for reads
15234          * we will still get the disconnects.  We'll also waste
15235          * these PCI cycles for both read and write for chips
15236          * other than 5700 and 5701 which do not implement the
15237          * boundary bits.
15238          */
15239         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15240                 switch (cacheline_size) {
15241                 case 16:
15242                 case 32:
15243                 case 64:
15244                 case 128:
15245                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15246                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15247                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15248                         } else {
15249                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15250                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15251                         }
15252                         break;
15253
15254                 case 256:
15255                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15256                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15257                         break;
15258
15259                 default:
15260                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15261                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15262                         break;
15263                 }
15264         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15265                 switch (cacheline_size) {
15266                 case 16:
15267                 case 32:
15268                 case 64:
15269                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15270                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15271                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15272                                 break;
15273                         }
15274                         /* fallthrough */
15275                 case 128:
15276                 default:
15277                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15278                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15279                         break;
15280                 }
15281         } else {
15282                 switch (cacheline_size) {
15283                 case 16:
15284                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15285                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15286                                         DMA_RWCTRL_WRITE_BNDRY_16);
15287                                 break;
15288                         }
15289                         /* fallthrough */
15290                 case 32:
15291                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15292                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15293                                         DMA_RWCTRL_WRITE_BNDRY_32);
15294                                 break;
15295                         }
15296                         /* fallthrough */
15297                 case 64:
15298                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15299                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15300                                         DMA_RWCTRL_WRITE_BNDRY_64);
15301                                 break;
15302                         }
15303                         /* fallthrough */
15304                 case 128:
15305                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15306                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15307                                         DMA_RWCTRL_WRITE_BNDRY_128);
15308                                 break;
15309                         }
15310                         /* fallthrough */
15311                 case 256:
15312                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15313                                 DMA_RWCTRL_WRITE_BNDRY_256);
15314                         break;
15315                 case 512:
15316                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15317                                 DMA_RWCTRL_WRITE_BNDRY_512);
15318                         break;
15319                 case 1024:
15320                 default:
15321                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15322                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15323                         break;
15324                 }
15325         }
15326
15327 out:
15328         return val;
15329 }
15330
15331 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15332 {
15333         struct tg3_internal_buffer_desc test_desc;
15334         u32 sram_dma_descs;
15335         int i, ret;
15336
15337         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15338
15339         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15340         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15341         tw32(RDMAC_STATUS, 0);
15342         tw32(WDMAC_STATUS, 0);
15343
15344         tw32(BUFMGR_MODE, 0);
15345         tw32(FTQ_RESET, 0);
15346
15347         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15348         test_desc.addr_lo = buf_dma & 0xffffffff;
15349         test_desc.nic_mbuf = 0x00002100;
15350         test_desc.len = size;
15351
15352         /*
15353          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15354          * the *second* time the tg3 driver was getting loaded after an
15355          * initial scan.
15356          *
15357          * Broadcom tells me:
15358          *   ...the DMA engine is connected to the GRC block and a DMA
15359          *   reset may affect the GRC block in some unpredictable way...
15360          *   The behavior of resets to individual blocks has not been tested.
15361          *
15362          * Broadcom noted the GRC reset will also reset all sub-components.
15363          */
15364         if (to_device) {
15365                 test_desc.cqid_sqid = (13 << 8) | 2;
15366
15367                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15368                 udelay(40);
15369         } else {
15370                 test_desc.cqid_sqid = (16 << 8) | 7;
15371
15372                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15373                 udelay(40);
15374         }
15375         test_desc.flags = 0x00000005;
15376
15377         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15378                 u32 val;
15379
15380                 val = *(((u32 *)&test_desc) + i);
15381                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15382                                        sram_dma_descs + (i * sizeof(u32)));
15383                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15384         }
15385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15386
15387         if (to_device)
15388                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15389         else
15390                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15391
15392         ret = -ENODEV;
15393         for (i = 0; i < 40; i++) {
15394                 u32 val;
15395
15396                 if (to_device)
15397                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15398                 else
15399                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15400                 if ((val & 0xffff) == sram_dma_descs) {
15401                         ret = 0;
15402                         break;
15403                 }
15404
15405                 udelay(100);
15406         }
15407
15408         return ret;
15409 }
15410
15411 #define TEST_BUFFER_SIZE        0x2000
15412
15413 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15414         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15415         { },
15416 };
15417
15418 static int __devinit tg3_test_dma(struct tg3 *tp)
15419 {
15420         dma_addr_t buf_dma;
15421         u32 *buf, saved_dma_rwctrl;
15422         int ret = 0;
15423
15424         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15425                                  &buf_dma, GFP_KERNEL);
15426         if (!buf) {
15427                 ret = -ENOMEM;
15428                 goto out_nofree;
15429         }
15430
15431         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15432                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15433
15434         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15435
15436         if (tg3_flag(tp, 57765_PLUS))
15437                 goto out;
15438
15439         if (tg3_flag(tp, PCI_EXPRESS)) {
15440                 /* DMA read watermark not used on PCIE */
15441                 tp->dma_rwctrl |= 0x00180000;
15442         } else if (!tg3_flag(tp, PCIX_MODE)) {
15443                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15444                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15445                         tp->dma_rwctrl |= 0x003f0000;
15446                 else
15447                         tp->dma_rwctrl |= 0x003f000f;
15448         } else {
15449                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15450                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15451                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15452                         u32 read_water = 0x7;
15453
15454                         /* If the 5704 is behind the EPB bridge, we can
15455                          * do the less restrictive ONE_DMA workaround for
15456                          * better performance.
15457                          */
15458                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15459                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15460                                 tp->dma_rwctrl |= 0x8000;
15461                         else if (ccval == 0x6 || ccval == 0x7)
15462                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15463
15464                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15465                                 read_water = 4;
15466                         /* Set bit 23 to enable PCIX hw bug fix */
15467                         tp->dma_rwctrl |=
15468                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15469                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15470                                 (1 << 23);
15471                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15472                         /* 5780 always in PCIX mode */
15473                         tp->dma_rwctrl |= 0x00144000;
15474                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15475                         /* 5714 always in PCIX mode */
15476                         tp->dma_rwctrl |= 0x00148000;
15477                 } else {
15478                         tp->dma_rwctrl |= 0x001b000f;
15479                 }
15480         }
15481
15482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15484                 tp->dma_rwctrl &= 0xfffffff0;
15485
15486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15487             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15488                 /* Remove this if it causes problems for some boards. */
15489                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15490
15491                 /* On 5700/5701 chips, we need to set this bit.
15492                  * Otherwise the chip will issue cacheline transactions
15493                  * to streamable DMA memory with not all the byte
15494                  * enables turned on.  This is an error on several
15495                  * RISC PCI controllers, in particular sparc64.
15496                  *
15497                  * On 5703/5704 chips, this bit has been reassigned
15498                  * a different meaning.  In particular, it is used
15499                  * on those chips to enable a PCI-X workaround.
15500                  */
15501                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15502         }
15503
15504         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15505
15506 #if 0
15507         /* Unneeded, already done by tg3_get_invariants.  */
15508         tg3_switch_clocks(tp);
15509 #endif
15510
15511         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15512             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15513                 goto out;
15514
15515         /* It is best to perform DMA test with maximum write burst size
15516          * to expose the 5700/5701 write DMA bug.
15517          */
15518         saved_dma_rwctrl = tp->dma_rwctrl;
15519         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15520         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15521
15522         while (1) {
15523                 u32 *p = buf, i;
15524
15525                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15526                         p[i] = i;
15527
15528                 /* Send the buffer to the chip. */
15529                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15530                 if (ret) {
15531                         dev_err(&tp->pdev->dev,
15532                                 "%s: Buffer write failed. err = %d\n",
15533                                 __func__, ret);
15534                         break;
15535                 }
15536
15537 #if 0
15538                 /* validate data reached card RAM correctly. */
15539                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15540                         u32 val;
15541                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15542                         if (le32_to_cpu(val) != p[i]) {
15543                                 dev_err(&tp->pdev->dev,
15544                                         "%s: Buffer corrupted on device! "
15545                                         "(%d != %d)\n", __func__, val, i);
15546                                 /* ret = -ENODEV here? */
15547                         }
15548                         p[i] = 0;
15549                 }
15550 #endif
15551                 /* Now read it back. */
15552                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15553                 if (ret) {
15554                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15555                                 "err = %d\n", __func__, ret);
15556                         break;
15557                 }
15558
15559                 /* Verify it. */
15560                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15561                         if (p[i] == i)
15562                                 continue;
15563
15564                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15565                             DMA_RWCTRL_WRITE_BNDRY_16) {
15566                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15567                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15568                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15569                                 break;
15570                         } else {
15571                                 dev_err(&tp->pdev->dev,
15572                                         "%s: Buffer corrupted on read back! "
15573                                         "(%d != %d)\n", __func__, p[i], i);
15574                                 ret = -ENODEV;
15575                                 goto out;
15576                         }
15577                 }
15578
15579                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15580                         /* Success. */
15581                         ret = 0;
15582                         break;
15583                 }
15584         }
15585         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15586             DMA_RWCTRL_WRITE_BNDRY_16) {
15587                 /* DMA test passed without adjusting DMA boundary,
15588                  * now look for chipsets that are known to expose the
15589                  * DMA bug without failing the test.
15590                  */
15591                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15592                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15593                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15594                 } else {
15595                         /* Safe to use the calculated DMA boundary. */
15596                         tp->dma_rwctrl = saved_dma_rwctrl;
15597                 }
15598
15599                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15600         }
15601
15602 out:
15603         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15604 out_nofree:
15605         return ret;
15606 }
15607
15608 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15609 {
15610         if (tg3_flag(tp, 57765_PLUS)) {
15611                 tp->bufmgr_config.mbuf_read_dma_low_water =
15612                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15613                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15614                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15615                 tp->bufmgr_config.mbuf_high_water =
15616                         DEFAULT_MB_HIGH_WATER_57765;
15617
15618                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15619                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15620                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15621                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15622                 tp->bufmgr_config.mbuf_high_water_jumbo =
15623                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15624         } else if (tg3_flag(tp, 5705_PLUS)) {
15625                 tp->bufmgr_config.mbuf_read_dma_low_water =
15626                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15627                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15628                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15629                 tp->bufmgr_config.mbuf_high_water =
15630                         DEFAULT_MB_HIGH_WATER_5705;
15631                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15632                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15633                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15634                         tp->bufmgr_config.mbuf_high_water =
15635                                 DEFAULT_MB_HIGH_WATER_5906;
15636                 }
15637
15638                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15639                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15640                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15641                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15642                 tp->bufmgr_config.mbuf_high_water_jumbo =
15643                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15644         } else {
15645                 tp->bufmgr_config.mbuf_read_dma_low_water =
15646                         DEFAULT_MB_RDMA_LOW_WATER;
15647                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15648                         DEFAULT_MB_MACRX_LOW_WATER;
15649                 tp->bufmgr_config.mbuf_high_water =
15650                         DEFAULT_MB_HIGH_WATER;
15651
15652                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15653                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15654                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15655                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15656                 tp->bufmgr_config.mbuf_high_water_jumbo =
15657                         DEFAULT_MB_HIGH_WATER_JUMBO;
15658         }
15659
15660         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15661         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15662 }
15663
15664 static char * __devinit tg3_phy_string(struct tg3 *tp)
15665 {
15666         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15667         case TG3_PHY_ID_BCM5400:        return "5400";
15668         case TG3_PHY_ID_BCM5401:        return "5401";
15669         case TG3_PHY_ID_BCM5411:        return "5411";
15670         case TG3_PHY_ID_BCM5701:        return "5701";
15671         case TG3_PHY_ID_BCM5703:        return "5703";
15672         case TG3_PHY_ID_BCM5704:        return "5704";
15673         case TG3_PHY_ID_BCM5705:        return "5705";
15674         case TG3_PHY_ID_BCM5750:        return "5750";
15675         case TG3_PHY_ID_BCM5752:        return "5752";
15676         case TG3_PHY_ID_BCM5714:        return "5714";
15677         case TG3_PHY_ID_BCM5780:        return "5780";
15678         case TG3_PHY_ID_BCM5755:        return "5755";
15679         case TG3_PHY_ID_BCM5787:        return "5787";
15680         case TG3_PHY_ID_BCM5784:        return "5784";
15681         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15682         case TG3_PHY_ID_BCM5906:        return "5906";
15683         case TG3_PHY_ID_BCM5761:        return "5761";
15684         case TG3_PHY_ID_BCM5718C:       return "5718C";
15685         case TG3_PHY_ID_BCM5718S:       return "5718S";
15686         case TG3_PHY_ID_BCM57765:       return "57765";
15687         case TG3_PHY_ID_BCM5719C:       return "5719C";
15688         case TG3_PHY_ID_BCM5720C:       return "5720C";
15689         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15690         case 0:                 return "serdes";
15691         default:                return "unknown";
15692         }
15693 }
15694
15695 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15696 {
15697         if (tg3_flag(tp, PCI_EXPRESS)) {
15698                 strcpy(str, "PCI Express");
15699                 return str;
15700         } else if (tg3_flag(tp, PCIX_MODE)) {
15701                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15702
15703                 strcpy(str, "PCIX:");
15704
15705                 if ((clock_ctrl == 7) ||
15706                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15707                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15708                         strcat(str, "133MHz");
15709                 else if (clock_ctrl == 0)
15710                         strcat(str, "33MHz");
15711                 else if (clock_ctrl == 2)
15712                         strcat(str, "50MHz");
15713                 else if (clock_ctrl == 4)
15714                         strcat(str, "66MHz");
15715                 else if (clock_ctrl == 6)
15716                         strcat(str, "100MHz");
15717         } else {
15718                 strcpy(str, "PCI:");
15719                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15720                         strcat(str, "66MHz");
15721                 else
15722                         strcat(str, "33MHz");
15723         }
15724         if (tg3_flag(tp, PCI_32BIT))
15725                 strcat(str, ":32-bit");
15726         else
15727                 strcat(str, ":64-bit");
15728         return str;
15729 }
15730
15731 static void __devinit tg3_init_coal(struct tg3 *tp)
15732 {
15733         struct ethtool_coalesce *ec = &tp->coal;
15734
15735         memset(ec, 0, sizeof(*ec));
15736         ec->cmd = ETHTOOL_GCOALESCE;
15737         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15738         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15739         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15740         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15741         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15742         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15743         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15744         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15745         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15746
15747         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15748                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15749                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15750                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15751                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15752                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15753         }
15754
15755         if (tg3_flag(tp, 5705_PLUS)) {
15756                 ec->rx_coalesce_usecs_irq = 0;
15757                 ec->tx_coalesce_usecs_irq = 0;
15758                 ec->stats_block_coalesce_usecs = 0;
15759         }
15760 }
15761
15762 static int __devinit tg3_init_one(struct pci_dev *pdev,
15763                                   const struct pci_device_id *ent)
15764 {
15765         struct net_device *dev;
15766         struct tg3 *tp;
15767         int i, err, pm_cap;
15768         u32 sndmbx, rcvmbx, intmbx;
15769         char str[40];
15770         u64 dma_mask, persist_dma_mask;
15771         netdev_features_t features = 0;
15772
15773         printk_once(KERN_INFO "%s\n", version);
15774
15775         err = pci_enable_device(pdev);
15776         if (err) {
15777                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15778                 return err;
15779         }
15780
15781         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15782         if (err) {
15783                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15784                 goto err_out_disable_pdev;
15785         }
15786
15787         pci_set_master(pdev);
15788
15789         /* Find power-management capability. */
15790         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15791         if (pm_cap == 0) {
15792                 dev_err(&pdev->dev,
15793                         "Cannot find Power Management capability, aborting\n");
15794                 err = -EIO;
15795                 goto err_out_free_res;
15796         }
15797
15798         err = pci_set_power_state(pdev, PCI_D0);
15799         if (err) {
15800                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15801                 goto err_out_free_res;
15802         }
15803
15804         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15805         if (!dev) {
15806                 err = -ENOMEM;
15807                 goto err_out_power_down;
15808         }
15809
15810         SET_NETDEV_DEV(dev, &pdev->dev);
15811
15812         tp = netdev_priv(dev);
15813         tp->pdev = pdev;
15814         tp->dev = dev;
15815         tp->pm_cap = pm_cap;
15816         tp->rx_mode = TG3_DEF_RX_MODE;
15817         tp->tx_mode = TG3_DEF_TX_MODE;
15818
15819         if (tg3_debug > 0)
15820                 tp->msg_enable = tg3_debug;
15821         else
15822                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15823
15824         /* The word/byte swap controls here control register access byte
15825          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15826          * setting below.
15827          */
15828         tp->misc_host_ctrl =
15829                 MISC_HOST_CTRL_MASK_PCI_INT |
15830                 MISC_HOST_CTRL_WORD_SWAP |
15831                 MISC_HOST_CTRL_INDIR_ACCESS |
15832                 MISC_HOST_CTRL_PCISTATE_RW;
15833
15834         /* The NONFRM (non-frame) byte/word swap controls take effect
15835          * on descriptor entries, anything which isn't packet data.
15836          *
15837          * The StrongARM chips on the board (one for tx, one for rx)
15838          * are running in big-endian mode.
15839          */
15840         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15841                         GRC_MODE_WSWAP_NONFRM_DATA);
15842 #ifdef __BIG_ENDIAN
15843         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15844 #endif
15845         spin_lock_init(&tp->lock);
15846         spin_lock_init(&tp->indirect_lock);
15847         INIT_WORK(&tp->reset_task, tg3_reset_task);
15848
15849         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15850         if (!tp->regs) {
15851                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15852                 err = -ENOMEM;
15853                 goto err_out_free_dev;
15854         }
15855
15856         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15857             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15858             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15859             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15860             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15861             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15862             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15863             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15864                 tg3_flag_set(tp, ENABLE_APE);
15865                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15866                 if (!tp->aperegs) {
15867                         dev_err(&pdev->dev,
15868                                 "Cannot map APE registers, aborting\n");
15869                         err = -ENOMEM;
15870                         goto err_out_iounmap;
15871                 }
15872         }
15873
15874         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15875         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15876
15877         dev->ethtool_ops = &tg3_ethtool_ops;
15878         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15879         dev->netdev_ops = &tg3_netdev_ops;
15880         dev->irq = pdev->irq;
15881
15882         err = tg3_get_invariants(tp);
15883         if (err) {
15884                 dev_err(&pdev->dev,
15885                         "Problem fetching invariants of chip, aborting\n");
15886                 goto err_out_apeunmap;
15887         }
15888
15889         /* The EPB bridge inside 5714, 5715, and 5780 and any
15890          * device behind the EPB cannot support DMA addresses > 40-bit.
15891          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15892          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15893          * do DMA address check in tg3_start_xmit().
15894          */
15895         if (tg3_flag(tp, IS_5788))
15896                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15897         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15898                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15899 #ifdef CONFIG_HIGHMEM
15900                 dma_mask = DMA_BIT_MASK(64);
15901 #endif
15902         } else
15903                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15904
15905         /* Configure DMA attributes. */
15906         if (dma_mask > DMA_BIT_MASK(32)) {
15907                 err = pci_set_dma_mask(pdev, dma_mask);
15908                 if (!err) {
15909                         features |= NETIF_F_HIGHDMA;
15910                         err = pci_set_consistent_dma_mask(pdev,
15911                                                           persist_dma_mask);
15912                         if (err < 0) {
15913                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15914                                         "DMA for consistent allocations\n");
15915                                 goto err_out_apeunmap;
15916                         }
15917                 }
15918         }
15919         if (err || dma_mask == DMA_BIT_MASK(32)) {
15920                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15921                 if (err) {
15922                         dev_err(&pdev->dev,
15923                                 "No usable DMA configuration, aborting\n");
15924                         goto err_out_apeunmap;
15925                 }
15926         }
15927
15928         tg3_init_bufmgr_config(tp);
15929
15930         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15931
15932         /* 5700 B0 chips do not support checksumming correctly due
15933          * to hardware bugs.
15934          */
15935         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15936                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15937
15938                 if (tg3_flag(tp, 5755_PLUS))
15939                         features |= NETIF_F_IPV6_CSUM;
15940         }
15941
15942         /* TSO is on by default on chips that support hardware TSO.
15943          * Firmware TSO on older chips gives lower performance, so it
15944          * is off by default, but can be enabled using ethtool.
15945          */
15946         if ((tg3_flag(tp, HW_TSO_1) ||
15947              tg3_flag(tp, HW_TSO_2) ||
15948              tg3_flag(tp, HW_TSO_3)) &&
15949             (features & NETIF_F_IP_CSUM))
15950                 features |= NETIF_F_TSO;
15951         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15952                 if (features & NETIF_F_IPV6_CSUM)
15953                         features |= NETIF_F_TSO6;
15954                 if (tg3_flag(tp, HW_TSO_3) ||
15955                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15956                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15957                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15958                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15959                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15960                         features |= NETIF_F_TSO_ECN;
15961         }
15962
15963         dev->features |= features;
15964         dev->vlan_features |= features;
15965
15966         /*
15967          * Add loopback capability only for a subset of devices that support
15968          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15969          * loopback for the remaining devices.
15970          */
15971         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15972             !tg3_flag(tp, CPMU_PRESENT))
15973                 /* Add the loopback capability */
15974                 features |= NETIF_F_LOOPBACK;
15975
15976         dev->hw_features |= features;
15977
15978         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15979             !tg3_flag(tp, TSO_CAPABLE) &&
15980             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15981                 tg3_flag_set(tp, MAX_RXPEND_64);
15982                 tp->rx_pending = 63;
15983         }
15984
15985         err = tg3_get_device_address(tp);
15986         if (err) {
15987                 dev_err(&pdev->dev,
15988                         "Could not obtain valid ethernet address, aborting\n");
15989                 goto err_out_apeunmap;
15990         }
15991
15992         /*
15993          * Reset chip in case UNDI or EFI driver did not shutdown
15994          * DMA self test will enable WDMAC and we'll see (spurious)
15995          * pending DMA on the PCI bus at that point.
15996          */
15997         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15998             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15999                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16000                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16001         }
16002
16003         err = tg3_test_dma(tp);
16004         if (err) {
16005                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16006                 goto err_out_apeunmap;
16007         }
16008
16009         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16010         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16011         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16012         for (i = 0; i < tp->irq_max; i++) {
16013                 struct tg3_napi *tnapi = &tp->napi[i];
16014
16015                 tnapi->tp = tp;
16016                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16017
16018                 tnapi->int_mbox = intmbx;
16019                 if (i <= 4)
16020                         intmbx += 0x8;
16021                 else
16022                         intmbx += 0x4;
16023
16024                 tnapi->consmbox = rcvmbx;
16025                 tnapi->prodmbox = sndmbx;
16026
16027                 if (i)
16028                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16029                 else
16030                         tnapi->coal_now = HOSTCC_MODE_NOW;
16031
16032                 if (!tg3_flag(tp, SUPPORT_MSIX))
16033                         break;
16034
16035                 /*
16036                  * If we support MSIX, we'll be using RSS.  If we're using
16037                  * RSS, the first vector only handles link interrupts and the
16038                  * remaining vectors handle rx and tx interrupts.  Reuse the
16039                  * mailbox values for the next iteration.  The values we setup
16040                  * above are still useful for the single vectored mode.
16041                  */
16042                 if (!i)
16043                         continue;
16044
16045                 rcvmbx += 0x8;
16046
16047                 if (sndmbx & 0x4)
16048                         sndmbx -= 0x4;
16049                 else
16050                         sndmbx += 0xc;
16051         }
16052
16053         tg3_init_coal(tp);
16054
16055         pci_set_drvdata(pdev, dev);
16056
16057         if (tg3_flag(tp, 5717_PLUS)) {
16058                 /* Resume a low-power mode */
16059                 tg3_frob_aux_power(tp, false);
16060         }
16061
16062         tg3_timer_init(tp);
16063
16064         err = register_netdev(dev);
16065         if (err) {
16066                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16067                 goto err_out_apeunmap;
16068         }
16069
16070         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16071                     tp->board_part_number,
16072                     tp->pci_chip_rev_id,
16073                     tg3_bus_string(tp, str),
16074                     dev->dev_addr);
16075
16076         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16077                 struct phy_device *phydev;
16078                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16079                 netdev_info(dev,
16080                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16081                             phydev->drv->name, dev_name(&phydev->dev));
16082         } else {
16083                 char *ethtype;
16084
16085                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16086                         ethtype = "10/100Base-TX";
16087                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16088                         ethtype = "1000Base-SX";
16089                 else
16090                         ethtype = "10/100/1000Base-T";
16091
16092                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16093                             "(WireSpeed[%d], EEE[%d])\n",
16094                             tg3_phy_string(tp), ethtype,
16095                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16096                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16097         }
16098
16099         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16100                     (dev->features & NETIF_F_RXCSUM) != 0,
16101                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16102                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16103                     tg3_flag(tp, ENABLE_ASF) != 0,
16104                     tg3_flag(tp, TSO_CAPABLE) != 0);
16105         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16106                     tp->dma_rwctrl,
16107                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16108                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16109
16110         pci_save_state(pdev);
16111
16112         return 0;
16113
16114 err_out_apeunmap:
16115         if (tp->aperegs) {
16116                 iounmap(tp->aperegs);
16117                 tp->aperegs = NULL;
16118         }
16119
16120 err_out_iounmap:
16121         if (tp->regs) {
16122                 iounmap(tp->regs);
16123                 tp->regs = NULL;
16124         }
16125
16126 err_out_free_dev:
16127         free_netdev(dev);
16128
16129 err_out_power_down:
16130         pci_set_power_state(pdev, PCI_D3hot);
16131
16132 err_out_free_res:
16133         pci_release_regions(pdev);
16134
16135 err_out_disable_pdev:
16136         pci_disable_device(pdev);
16137         pci_set_drvdata(pdev, NULL);
16138         return err;
16139 }
16140
16141 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16142 {
16143         struct net_device *dev = pci_get_drvdata(pdev);
16144
16145         if (dev) {
16146                 struct tg3 *tp = netdev_priv(dev);
16147
16148                 release_firmware(tp->fw);
16149
16150                 tg3_reset_task_cancel(tp);
16151
16152                 if (tg3_flag(tp, USE_PHYLIB)) {
16153                         tg3_phy_fini(tp);
16154                         tg3_mdio_fini(tp);
16155                 }
16156
16157                 unregister_netdev(dev);
16158                 if (tp->aperegs) {
16159                         iounmap(tp->aperegs);
16160                         tp->aperegs = NULL;
16161                 }
16162                 if (tp->regs) {
16163                         iounmap(tp->regs);
16164                         tp->regs = NULL;
16165                 }
16166                 free_netdev(dev);
16167                 pci_release_regions(pdev);
16168                 pci_disable_device(pdev);
16169                 pci_set_drvdata(pdev, NULL);
16170         }
16171 }
16172
16173 #ifdef CONFIG_PM_SLEEP
16174 static int tg3_suspend(struct device *device)
16175 {
16176         struct pci_dev *pdev = to_pci_dev(device);
16177         struct net_device *dev = pci_get_drvdata(pdev);
16178         struct tg3 *tp = netdev_priv(dev);
16179         int err;
16180
16181         if (!netif_running(dev))
16182                 return 0;
16183
16184         tg3_reset_task_cancel(tp);
16185         tg3_phy_stop(tp);
16186         tg3_netif_stop(tp);
16187
16188         tg3_timer_stop(tp);
16189
16190         tg3_full_lock(tp, 1);
16191         tg3_disable_ints(tp);
16192         tg3_full_unlock(tp);
16193
16194         netif_device_detach(dev);
16195
16196         tg3_full_lock(tp, 0);
16197         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16198         tg3_flag_clear(tp, INIT_COMPLETE);
16199         tg3_full_unlock(tp);
16200
16201         err = tg3_power_down_prepare(tp);
16202         if (err) {
16203                 int err2;
16204
16205                 tg3_full_lock(tp, 0);
16206
16207                 tg3_flag_set(tp, INIT_COMPLETE);
16208                 err2 = tg3_restart_hw(tp, 1);
16209                 if (err2)
16210                         goto out;
16211
16212                 tg3_timer_start(tp);
16213
16214                 netif_device_attach(dev);
16215                 tg3_netif_start(tp);
16216
16217 out:
16218                 tg3_full_unlock(tp);
16219
16220                 if (!err2)
16221                         tg3_phy_start(tp);
16222         }
16223
16224         return err;
16225 }
16226
16227 static int tg3_resume(struct device *device)
16228 {
16229         struct pci_dev *pdev = to_pci_dev(device);
16230         struct net_device *dev = pci_get_drvdata(pdev);
16231         struct tg3 *tp = netdev_priv(dev);
16232         int err;
16233
16234         if (!netif_running(dev))
16235                 return 0;
16236
16237         netif_device_attach(dev);
16238
16239         tg3_full_lock(tp, 0);
16240
16241         tg3_flag_set(tp, INIT_COMPLETE);
16242         err = tg3_restart_hw(tp, 1);
16243         if (err)
16244                 goto out;
16245
16246         tg3_timer_start(tp);
16247
16248         tg3_netif_start(tp);
16249
16250 out:
16251         tg3_full_unlock(tp);
16252
16253         if (!err)
16254                 tg3_phy_start(tp);
16255
16256         return err;
16257 }
16258
16259 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16260 #define TG3_PM_OPS (&tg3_pm_ops)
16261
16262 #else
16263
16264 #define TG3_PM_OPS NULL
16265
16266 #endif /* CONFIG_PM_SLEEP */
16267
16268 /**
16269  * tg3_io_error_detected - called when PCI error is detected
16270  * @pdev: Pointer to PCI device
16271  * @state: The current pci connection state
16272  *
16273  * This function is called after a PCI bus error affecting
16274  * this device has been detected.
16275  */
16276 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16277                                               pci_channel_state_t state)
16278 {
16279         struct net_device *netdev = pci_get_drvdata(pdev);
16280         struct tg3 *tp = netdev_priv(netdev);
16281         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16282
16283         netdev_info(netdev, "PCI I/O error detected\n");
16284
16285         rtnl_lock();
16286
16287         if (!netif_running(netdev))
16288                 goto done;
16289
16290         tg3_phy_stop(tp);
16291
16292         tg3_netif_stop(tp);
16293
16294         tg3_timer_stop(tp);
16295
16296         /* Want to make sure that the reset task doesn't run */
16297         tg3_reset_task_cancel(tp);
16298
16299         netif_device_detach(netdev);
16300
16301         /* Clean up software state, even if MMIO is blocked */
16302         tg3_full_lock(tp, 0);
16303         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16304         tg3_full_unlock(tp);
16305
16306 done:
16307         if (state == pci_channel_io_perm_failure)
16308                 err = PCI_ERS_RESULT_DISCONNECT;
16309         else
16310                 pci_disable_device(pdev);
16311
16312         rtnl_unlock();
16313
16314         return err;
16315 }
16316
16317 /**
16318  * tg3_io_slot_reset - called after the pci bus has been reset.
16319  * @pdev: Pointer to PCI device
16320  *
16321  * Restart the card from scratch, as if from a cold-boot.
16322  * At this point, the card has exprienced a hard reset,
16323  * followed by fixups by BIOS, and has its config space
16324  * set up identically to what it was at cold boot.
16325  */
16326 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16327 {
16328         struct net_device *netdev = pci_get_drvdata(pdev);
16329         struct tg3 *tp = netdev_priv(netdev);
16330         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16331         int err;
16332
16333         rtnl_lock();
16334
16335         if (pci_enable_device(pdev)) {
16336                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16337                 goto done;
16338         }
16339
16340         pci_set_master(pdev);
16341         pci_restore_state(pdev);
16342         pci_save_state(pdev);
16343
16344         if (!netif_running(netdev)) {
16345                 rc = PCI_ERS_RESULT_RECOVERED;
16346                 goto done;
16347         }
16348
16349         err = tg3_power_up(tp);
16350         if (err)
16351                 goto done;
16352
16353         rc = PCI_ERS_RESULT_RECOVERED;
16354
16355 done:
16356         rtnl_unlock();
16357
16358         return rc;
16359 }
16360
16361 /**
16362  * tg3_io_resume - called when traffic can start flowing again.
16363  * @pdev: Pointer to PCI device
16364  *
16365  * This callback is called when the error recovery driver tells
16366  * us that its OK to resume normal operation.
16367  */
16368 static void tg3_io_resume(struct pci_dev *pdev)
16369 {
16370         struct net_device *netdev = pci_get_drvdata(pdev);
16371         struct tg3 *tp = netdev_priv(netdev);
16372         int err;
16373
16374         rtnl_lock();
16375
16376         if (!netif_running(netdev))
16377                 goto done;
16378
16379         tg3_full_lock(tp, 0);
16380         tg3_flag_set(tp, INIT_COMPLETE);
16381         err = tg3_restart_hw(tp, 1);
16382         tg3_full_unlock(tp);
16383         if (err) {
16384                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16385                 goto done;
16386         }
16387
16388         netif_device_attach(netdev);
16389
16390         tg3_timer_start(tp);
16391
16392         tg3_netif_start(tp);
16393
16394         tg3_phy_start(tp);
16395
16396 done:
16397         rtnl_unlock();
16398 }
16399
16400 static struct pci_error_handlers tg3_err_handler = {
16401         .error_detected = tg3_io_error_detected,
16402         .slot_reset     = tg3_io_slot_reset,
16403         .resume         = tg3_io_resume
16404 };
16405
16406 static struct pci_driver tg3_driver = {
16407         .name           = DRV_MODULE_NAME,
16408         .id_table       = tg3_pci_tbl,
16409         .probe          = tg3_init_one,
16410         .remove         = __devexit_p(tg3_remove_one),
16411         .err_handler    = &tg3_err_handler,
16412         .driver.pm      = TG3_PM_OPS,
16413 };
16414
16415 static int __init tg3_init(void)
16416 {
16417         return pci_register_driver(&tg3_driver);
16418 }
16419
16420 static void __exit tg3_cleanup(void)
16421 {
16422         pci_unregister_driver(&tg3_driver);
16423 }
16424
16425 module_init(tg3_init);
16426 module_exit(tg3_cleanup);