tg3: Obtain PCI function number from device
[platform/upstream/kernel-adaptation-pc.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     120
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "August 18, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       0
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_PAUSE_CAP;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_PAUSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1690 {
1691         u16 miireg;
1692
1693         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694                 miireg = ADVERTISE_1000XPAUSE;
1695         else if (flow_ctrl & FLOW_CTRL_TX)
1696                 miireg = ADVERTISE_1000XPSE_ASYM;
1697         else if (flow_ctrl & FLOW_CTRL_RX)
1698                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699         else
1700                 miireg = 0;
1701
1702         return miireg;
1703 }
1704
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1706 {
1707         u8 cap = 0;
1708
1709         if (lcladv & ADVERTISE_1000XPAUSE) {
1710                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1711                         if (rmtadv & LPA_1000XPAUSE)
1712                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1713                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1714                                 cap = FLOW_CTRL_RX;
1715                 } else {
1716                         if (rmtadv & LPA_1000XPAUSE)
1717                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1718                 }
1719         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1720                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1721                         cap = FLOW_CTRL_TX;
1722         }
1723
1724         return cap;
1725 }
1726
1727 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1728 {
1729         u8 autoneg;
1730         u8 flowctrl = 0;
1731         u32 old_rx_mode = tp->rx_mode;
1732         u32 old_tx_mode = tp->tx_mode;
1733
1734         if (tg3_flag(tp, USE_PHYLIB))
1735                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1736         else
1737                 autoneg = tp->link_config.autoneg;
1738
1739         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1740                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1741                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1742                 else
1743                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1744         } else
1745                 flowctrl = tp->link_config.flowctrl;
1746
1747         tp->link_config.active_flowctrl = flowctrl;
1748
1749         if (flowctrl & FLOW_CTRL_RX)
1750                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1751         else
1752                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1753
1754         if (old_rx_mode != tp->rx_mode)
1755                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1756
1757         if (flowctrl & FLOW_CTRL_TX)
1758                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1759         else
1760                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1761
1762         if (old_tx_mode != tp->tx_mode)
1763                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1764 }
1765
1766 static void tg3_adjust_link(struct net_device *dev)
1767 {
1768         u8 oldflowctrl, linkmesg = 0;
1769         u32 mac_mode, lcl_adv, rmt_adv;
1770         struct tg3 *tp = netdev_priv(dev);
1771         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1772
1773         spin_lock_bh(&tp->lock);
1774
1775         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1776                                     MAC_MODE_HALF_DUPLEX);
1777
1778         oldflowctrl = tp->link_config.active_flowctrl;
1779
1780         if (phydev->link) {
1781                 lcl_adv = 0;
1782                 rmt_adv = 0;
1783
1784                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1785                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1786                 else if (phydev->speed == SPEED_1000 ||
1787                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1788                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1789                 else
1790                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1791
1792                 if (phydev->duplex == DUPLEX_HALF)
1793                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1794                 else {
1795                         lcl_adv = tg3_advert_flowctrl_1000T(
1796                                   tp->link_config.flowctrl);
1797
1798                         if (phydev->pause)
1799                                 rmt_adv = LPA_PAUSE_CAP;
1800                         if (phydev->asym_pause)
1801                                 rmt_adv |= LPA_PAUSE_ASYM;
1802                 }
1803
1804                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1805         } else
1806                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1807
1808         if (mac_mode != tp->mac_mode) {
1809                 tp->mac_mode = mac_mode;
1810                 tw32_f(MAC_MODE, tp->mac_mode);
1811                 udelay(40);
1812         }
1813
1814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1815                 if (phydev->speed == SPEED_10)
1816                         tw32(MAC_MI_STAT,
1817                              MAC_MI_STAT_10MBPS_MODE |
1818                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1819                 else
1820                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1821         }
1822
1823         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1824                 tw32(MAC_TX_LENGTHS,
1825                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1826                       (6 << TX_LENGTHS_IPG_SHIFT) |
1827                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1828         else
1829                 tw32(MAC_TX_LENGTHS,
1830                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1831                       (6 << TX_LENGTHS_IPG_SHIFT) |
1832                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1833
1834         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1835             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1836             phydev->speed != tp->link_config.active_speed ||
1837             phydev->duplex != tp->link_config.active_duplex ||
1838             oldflowctrl != tp->link_config.active_flowctrl)
1839                 linkmesg = 1;
1840
1841         tp->link_config.active_speed = phydev->speed;
1842         tp->link_config.active_duplex = phydev->duplex;
1843
1844         spin_unlock_bh(&tp->lock);
1845
1846         if (linkmesg)
1847                 tg3_link_report(tp);
1848 }
1849
1850 static int tg3_phy_init(struct tg3 *tp)
1851 {
1852         struct phy_device *phydev;
1853
1854         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1855                 return 0;
1856
1857         /* Bring the PHY back to a known state. */
1858         tg3_bmcr_reset(tp);
1859
1860         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1861
1862         /* Attach the MAC to the PHY. */
1863         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1864                              phydev->dev_flags, phydev->interface);
1865         if (IS_ERR(phydev)) {
1866                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1867                 return PTR_ERR(phydev);
1868         }
1869
1870         /* Mask with MAC supported features. */
1871         switch (phydev->interface) {
1872         case PHY_INTERFACE_MODE_GMII:
1873         case PHY_INTERFACE_MODE_RGMII:
1874                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1875                         phydev->supported &= (PHY_GBIT_FEATURES |
1876                                               SUPPORTED_Pause |
1877                                               SUPPORTED_Asym_Pause);
1878                         break;
1879                 }
1880                 /* fallthru */
1881         case PHY_INTERFACE_MODE_MII:
1882                 phydev->supported &= (PHY_BASIC_FEATURES |
1883                                       SUPPORTED_Pause |
1884                                       SUPPORTED_Asym_Pause);
1885                 break;
1886         default:
1887                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1888                 return -EINVAL;
1889         }
1890
1891         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1892
1893         phydev->advertising = phydev->supported;
1894
1895         return 0;
1896 }
1897
1898 static void tg3_phy_start(struct tg3 *tp)
1899 {
1900         struct phy_device *phydev;
1901
1902         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1903                 return;
1904
1905         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1906
1907         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1908                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1909                 phydev->speed = tp->link_config.orig_speed;
1910                 phydev->duplex = tp->link_config.orig_duplex;
1911                 phydev->autoneg = tp->link_config.orig_autoneg;
1912                 phydev->advertising = tp->link_config.orig_advertising;
1913         }
1914
1915         phy_start(phydev);
1916
1917         phy_start_aneg(phydev);
1918 }
1919
1920 static void tg3_phy_stop(struct tg3 *tp)
1921 {
1922         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1923                 return;
1924
1925         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 }
1927
1928 static void tg3_phy_fini(struct tg3 *tp)
1929 {
1930         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1931                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1932                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1933         }
1934 }
1935
1936 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1937 {
1938         int err;
1939         u32 val;
1940
1941         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1942                 return 0;
1943
1944         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1945                 /* Cannot do read-modify-write on 5401 */
1946                 err = tg3_phy_auxctl_write(tp,
1947                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1948                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1949                                            0x4c20);
1950                 goto done;
1951         }
1952
1953         err = tg3_phy_auxctl_read(tp,
1954                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1955         if (err)
1956                 return err;
1957
1958         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1959         err = tg3_phy_auxctl_write(tp,
1960                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1961
1962 done:
1963         return err;
1964 }
1965
1966 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1967 {
1968         u32 phytest;
1969
1970         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1971                 u32 phy;
1972
1973                 tg3_writephy(tp, MII_TG3_FET_TEST,
1974                              phytest | MII_TG3_FET_SHADOW_EN);
1975                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1976                         if (enable)
1977                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978                         else
1979                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1980                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1981                 }
1982                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1983         }
1984 }
1985
1986 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1987 {
1988         u32 reg;
1989
1990         if (!tg3_flag(tp, 5705_PLUS) ||
1991             (tg3_flag(tp, 5717_PLUS) &&
1992              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1993                 return;
1994
1995         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1996                 tg3_phy_fet_toggle_apd(tp, enable);
1997                 return;
1998         }
1999
2000         reg = MII_TG3_MISC_SHDW_WREN |
2001               MII_TG3_MISC_SHDW_SCR5_SEL |
2002               MII_TG3_MISC_SHDW_SCR5_LPED |
2003               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2004               MII_TG3_MISC_SHDW_SCR5_SDTL |
2005               MII_TG3_MISC_SHDW_SCR5_C125OE;
2006         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2007                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2008
2009         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010
2011
2012         reg = MII_TG3_MISC_SHDW_WREN |
2013               MII_TG3_MISC_SHDW_APD_SEL |
2014               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2015         if (enable)
2016                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2017
2018         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2019 }
2020
2021 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2022 {
2023         u32 phy;
2024
2025         if (!tg3_flag(tp, 5705_PLUS) ||
2026             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2027                 return;
2028
2029         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2030                 u32 ephy;
2031
2032                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2033                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2034
2035                         tg3_writephy(tp, MII_TG3_FET_TEST,
2036                                      ephy | MII_TG3_FET_SHADOW_EN);
2037                         if (!tg3_readphy(tp, reg, &phy)) {
2038                                 if (enable)
2039                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040                                 else
2041                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2042                                 tg3_writephy(tp, reg, phy);
2043                         }
2044                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2045                 }
2046         } else {
2047                 int ret;
2048
2049                 ret = tg3_phy_auxctl_read(tp,
2050                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2051                 if (!ret) {
2052                         if (enable)
2053                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054                         else
2055                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2056                         tg3_phy_auxctl_write(tp,
2057                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2058                 }
2059         }
2060 }
2061
2062 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2063 {
2064         int ret;
2065         u32 val;
2066
2067         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2068                 return;
2069
2070         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2071         if (!ret)
2072                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2073                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2074 }
2075
2076 static void tg3_phy_apply_otp(struct tg3 *tp)
2077 {
2078         u32 otp, phy;
2079
2080         if (!tp->phy_otp)
2081                 return;
2082
2083         otp = tp->phy_otp;
2084
2085         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2086                 return;
2087
2088         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2089         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2090         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2091
2092         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2093               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2094         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2095
2096         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2097         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2098         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2099
2100         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2101         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2102
2103         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2104         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2105
2106         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2107               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2108         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2109
2110         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 }
2112
2113 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2114 {
2115         u32 val;
2116
2117         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2118                 return;
2119
2120         tp->setlpicnt = 0;
2121
2122         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2123             current_link_up == 1 &&
2124             tp->link_config.active_duplex == DUPLEX_FULL &&
2125             (tp->link_config.active_speed == SPEED_100 ||
2126              tp->link_config.active_speed == SPEED_1000)) {
2127                 u32 eeectl;
2128
2129                 if (tp->link_config.active_speed == SPEED_1000)
2130                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2131                 else
2132                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2133
2134                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2135
2136                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2137                                   TG3_CL45_D7_EEERES_STAT, &val);
2138
2139                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2140                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2141                         tp->setlpicnt = 2;
2142         }
2143
2144         if (!tp->setlpicnt) {
2145                 if (current_link_up == 1 &&
2146                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2147                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2148                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2149                 }
2150
2151                 val = tr32(TG3_CPMU_EEE_MODE);
2152                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2153         }
2154 }
2155
2156 static void tg3_phy_eee_enable(struct tg3 *tp)
2157 {
2158         u32 val;
2159
2160         if (tp->link_config.active_speed == SPEED_1000 &&
2161             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2162              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2163              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2164             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2165                 val = MII_TG3_DSP_TAP26_ALNOKO |
2166                       MII_TG3_DSP_TAP26_RMRXSTO;
2167                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2168                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2169         }
2170
2171         val = tr32(TG3_CPMU_EEE_MODE);
2172         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2173 }
2174
2175 static int tg3_wait_macro_done(struct tg3 *tp)
2176 {
2177         int limit = 100;
2178
2179         while (limit--) {
2180                 u32 tmp32;
2181
2182                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2183                         if ((tmp32 & 0x1000) == 0)
2184                                 break;
2185                 }
2186         }
2187         if (limit < 0)
2188                 return -EBUSY;
2189
2190         return 0;
2191 }
2192
2193 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2194 {
2195         static const u32 test_pat[4][6] = {
2196         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2197         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2198         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2199         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2200         };
2201         int chan;
2202
2203         for (chan = 0; chan < 4; chan++) {
2204                 int i;
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2207                              (chan * 0x2000) | 0x0200);
2208                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2209
2210                 for (i = 0; i < 6; i++)
2211                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2212                                      test_pat[chan][i]);
2213
2214                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2215                 if (tg3_wait_macro_done(tp)) {
2216                         *resetp = 1;
2217                         return -EBUSY;
2218                 }
2219
2220                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2221                              (chan * 0x2000) | 0x0200);
2222                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2223                 if (tg3_wait_macro_done(tp)) {
2224                         *resetp = 1;
2225                         return -EBUSY;
2226                 }
2227
2228                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2229                 if (tg3_wait_macro_done(tp)) {
2230                         *resetp = 1;
2231                         return -EBUSY;
2232                 }
2233
2234                 for (i = 0; i < 6; i += 2) {
2235                         u32 low, high;
2236
2237                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2238                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2239                             tg3_wait_macro_done(tp)) {
2240                                 *resetp = 1;
2241                                 return -EBUSY;
2242                         }
2243                         low &= 0x7fff;
2244                         high &= 0x000f;
2245                         if (low != test_pat[chan][i] ||
2246                             high != test_pat[chan][i+1]) {
2247                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2248                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2249                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2250
2251                                 return -EBUSY;
2252                         }
2253                 }
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2260 {
2261         int chan;
2262
2263         for (chan = 0; chan < 4; chan++) {
2264                 int i;
2265
2266                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2267                              (chan * 0x2000) | 0x0200);
2268                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2269                 for (i = 0; i < 6; i++)
2270                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2271                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2272                 if (tg3_wait_macro_done(tp))
2273                         return -EBUSY;
2274         }
2275
2276         return 0;
2277 }
2278
2279 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2280 {
2281         u32 reg32, phy9_orig;
2282         int retries, do_phy_reset, err;
2283
2284         retries = 10;
2285         do_phy_reset = 1;
2286         do {
2287                 if (do_phy_reset) {
2288                         err = tg3_bmcr_reset(tp);
2289                         if (err)
2290                                 return err;
2291                         do_phy_reset = 0;
2292                 }
2293
2294                 /* Disable transmitter and interrupt.  */
2295                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2296                         continue;
2297
2298                 reg32 |= 0x3000;
2299                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2300
2301                 /* Set full-duplex, 1000 mbps.  */
2302                 tg3_writephy(tp, MII_BMCR,
2303                              BMCR_FULLDPLX | BMCR_SPEED1000);
2304
2305                 /* Set to master mode.  */
2306                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2307                         continue;
2308
2309                 tg3_writephy(tp, MII_CTRL1000,
2310                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2311
2312                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2313                 if (err)
2314                         return err;
2315
2316                 /* Block the PHY control access.  */
2317                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2318
2319                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2320                 if (!err)
2321                         break;
2322         } while (--retries);
2323
2324         err = tg3_phy_reset_chanpat(tp);
2325         if (err)
2326                 return err;
2327
2328         tg3_phydsp_write(tp, 0x8005, 0x0000);
2329
2330         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2331         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2332
2333         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2334
2335         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2336
2337         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2338                 reg32 &= ~0x3000;
2339                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2340         } else if (!err)
2341                 err = -EBUSY;
2342
2343         return err;
2344 }
2345
2346 /* This will reset the tigon3 PHY if there is no valid
2347  * link unless the FORCE argument is non-zero.
2348  */
2349 static int tg3_phy_reset(struct tg3 *tp)
2350 {
2351         u32 val, cpmuctrl;
2352         int err;
2353
2354         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2355                 val = tr32(GRC_MISC_CFG);
2356                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2357                 udelay(40);
2358         }
2359         err  = tg3_readphy(tp, MII_BMSR, &val);
2360         err |= tg3_readphy(tp, MII_BMSR, &val);
2361         if (err != 0)
2362                 return -EBUSY;
2363
2364         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2365                 netif_carrier_off(tp->dev);
2366                 tg3_link_report(tp);
2367         }
2368
2369         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2371             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2372                 err = tg3_phy_reset_5703_4_5(tp);
2373                 if (err)
2374                         return err;
2375                 goto out;
2376         }
2377
2378         cpmuctrl = 0;
2379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2380             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2381                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2382                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2383                         tw32(TG3_CPMU_CTRL,
2384                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2385         }
2386
2387         err = tg3_bmcr_reset(tp);
2388         if (err)
2389                 return err;
2390
2391         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2392                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2393                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2394
2395                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2396         }
2397
2398         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2399             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2400                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2401                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2402                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2403                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2404                         udelay(40);
2405                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2406                 }
2407         }
2408
2409         if (tg3_flag(tp, 5717_PLUS) &&
2410             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2411                 return 0;
2412
2413         tg3_phy_apply_otp(tp);
2414
2415         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2416                 tg3_phy_toggle_apd(tp, true);
2417         else
2418                 tg3_phy_toggle_apd(tp, false);
2419
2420 out:
2421         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2422             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2424                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2425                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2426         }
2427
2428         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2429                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2430                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2431         }
2432
2433         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2434                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2436                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2437                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2438                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439                 }
2440         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2441                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2442                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2443                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2444                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2445                                 tg3_writephy(tp, MII_TG3_TEST1,
2446                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2447                         } else
2448                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2449
2450                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2451                 }
2452         }
2453
2454         /* Set Extended packet length bit (bit 14) on all chips that */
2455         /* support jumbo frames */
2456         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2457                 /* Cannot do read-modify-write on 5401 */
2458                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2459         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460                 /* Set bit 14 with read-modify-write to preserve other bits */
2461                 err = tg3_phy_auxctl_read(tp,
2462                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2463                 if (!err)
2464                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2465                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2466         }
2467
2468         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2469          * jumbo frames transmission.
2470          */
2471         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2472                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2473                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2474                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2475         }
2476
2477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478                 /* adjust output voltage */
2479                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2480         }
2481
2482         tg3_phy_toggle_automdix(tp, 1);
2483         tg3_phy_set_wirespeed(tp);
2484         return 0;
2485 }
2486
2487 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2488 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2489 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2490                                           TG3_GPIO_MSG_NEED_VAUX)
2491 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2492         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2493          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2494          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2495          (TG3_GPIO_MSG_DRVR_PRES << 12))
2496
2497 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2498         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2499          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2500          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2501          (TG3_GPIO_MSG_NEED_VAUX << 12))
2502
2503 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2504 {
2505         u32 status, shift;
2506
2507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2508             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2509                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2510         else
2511                 status = tr32(TG3_CPMU_DRV_STATUS);
2512
2513         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2514         status &= ~(TG3_GPIO_MSG_MASK << shift);
2515         status |= (newstat << shift);
2516
2517         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2519                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2520         else
2521                 tw32(TG3_CPMU_DRV_STATUS, status);
2522
2523         return status >> TG3_APE_GPIO_MSG_SHIFT;
2524 }
2525
2526 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2527 {
2528         if (!tg3_flag(tp, IS_NIC))
2529                 return 0;
2530
2531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2534                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2535                         return -EIO;
2536
2537                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2538
2539                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2540                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2541
2542                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2543         } else {
2544                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2545                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2546         }
2547
2548         return 0;
2549 }
2550
2551 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2552 {
2553         u32 grc_local_ctrl;
2554
2555         if (!tg3_flag(tp, IS_NIC) ||
2556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2557             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2558                 return;
2559
2560         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2561
2562         tw32_wait_f(GRC_LOCAL_CTRL,
2563                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2564                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2565
2566         tw32_wait_f(GRC_LOCAL_CTRL,
2567                     grc_local_ctrl,
2568                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2569
2570         tw32_wait_f(GRC_LOCAL_CTRL,
2571                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2572                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2573 }
2574
2575 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2576 {
2577         if (!tg3_flag(tp, IS_NIC))
2578                 return;
2579
2580         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2581             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2582                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2583                             (GRC_LCLCTRL_GPIO_OE0 |
2584                              GRC_LCLCTRL_GPIO_OE1 |
2585                              GRC_LCLCTRL_GPIO_OE2 |
2586                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2587                              GRC_LCLCTRL_GPIO_OUTPUT1),
2588                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2589         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2590                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2591                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2592                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2593                                      GRC_LCLCTRL_GPIO_OE1 |
2594                                      GRC_LCLCTRL_GPIO_OE2 |
2595                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2596                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2597                                      tp->grc_local_ctrl;
2598                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2599                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2600
2601                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2602                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2603                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2604
2605                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2606                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2607                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2608         } else {
2609                 u32 no_gpio2;
2610                 u32 grc_local_ctrl = 0;
2611
2612                 /* Workaround to prevent overdrawing Amps. */
2613                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2614                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2615                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2616                                     grc_local_ctrl,
2617                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2618                 }
2619
2620                 /* On 5753 and variants, GPIO2 cannot be used. */
2621                 no_gpio2 = tp->nic_sram_data_cfg &
2622                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2623
2624                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2625                                   GRC_LCLCTRL_GPIO_OE1 |
2626                                   GRC_LCLCTRL_GPIO_OE2 |
2627                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2628                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2629                 if (no_gpio2) {
2630                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2631                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2632                 }
2633                 tw32_wait_f(GRC_LOCAL_CTRL,
2634                             tp->grc_local_ctrl | grc_local_ctrl,
2635                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2636
2637                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2638
2639                 tw32_wait_f(GRC_LOCAL_CTRL,
2640                             tp->grc_local_ctrl | grc_local_ctrl,
2641                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2642
2643                 if (!no_gpio2) {
2644                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2645                         tw32_wait_f(GRC_LOCAL_CTRL,
2646                                     tp->grc_local_ctrl | grc_local_ctrl,
2647                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2648                 }
2649         }
2650 }
2651
2652 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2653 {
2654         u32 msg = 0;
2655
2656         /* Serialize power state transitions */
2657         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2658                 return;
2659
2660         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2661                 msg = TG3_GPIO_MSG_NEED_VAUX;
2662
2663         msg = tg3_set_function_status(tp, msg);
2664
2665         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2666                 goto done;
2667
2668         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2669                 tg3_pwrsrc_switch_to_vaux(tp);
2670         else
2671                 tg3_pwrsrc_die_with_vmain(tp);
2672
2673 done:
2674         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2675 }
2676
2677 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2678 {
2679         bool need_vaux = false;
2680
2681         /* The GPIOs do something completely different on 57765. */
2682         if (!tg3_flag(tp, IS_NIC) ||
2683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2684                 return;
2685
2686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2687             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2689                 tg3_frob_aux_power_5717(tp, include_wol ?
2690                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2691                 return;
2692         }
2693
2694         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2695                 struct net_device *dev_peer;
2696
2697                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2698
2699                 /* remove_one() may have been run on the peer. */
2700                 if (dev_peer) {
2701                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2702
2703                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2704                                 return;
2705
2706                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2707                             tg3_flag(tp_peer, ENABLE_ASF))
2708                                 need_vaux = true;
2709                 }
2710         }
2711
2712         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2713             tg3_flag(tp, ENABLE_ASF))
2714                 need_vaux = true;
2715
2716         if (need_vaux)
2717                 tg3_pwrsrc_switch_to_vaux(tp);
2718         else
2719                 tg3_pwrsrc_die_with_vmain(tp);
2720 }
2721
2722 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2723 {
2724         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2725                 return 1;
2726         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2727                 if (speed != SPEED_10)
2728                         return 1;
2729         } else if (speed == SPEED_10)
2730                 return 1;
2731
2732         return 0;
2733 }
2734
2735 static int tg3_setup_phy(struct tg3 *, int);
2736 static int tg3_halt_cpu(struct tg3 *, u32);
2737
2738 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2739 {
2740         u32 val;
2741
2742         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2743                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2744                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2745                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2746
2747                         sg_dig_ctrl |=
2748                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2749                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2750                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2751                 }
2752                 return;
2753         }
2754
2755         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2756                 tg3_bmcr_reset(tp);
2757                 val = tr32(GRC_MISC_CFG);
2758                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2759                 udelay(40);
2760                 return;
2761         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2762                 u32 phytest;
2763                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2764                         u32 phy;
2765
2766                         tg3_writephy(tp, MII_ADVERTISE, 0);
2767                         tg3_writephy(tp, MII_BMCR,
2768                                      BMCR_ANENABLE | BMCR_ANRESTART);
2769
2770                         tg3_writephy(tp, MII_TG3_FET_TEST,
2771                                      phytest | MII_TG3_FET_SHADOW_EN);
2772                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2773                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2774                                 tg3_writephy(tp,
2775                                              MII_TG3_FET_SHDW_AUXMODE4,
2776                                              phy);
2777                         }
2778                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2779                 }
2780                 return;
2781         } else if (do_low_power) {
2782                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2783                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2784
2785                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2786                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2787                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2789         }
2790
2791         /* The PHY should not be powered down on some chips because
2792          * of bugs.
2793          */
2794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2796             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2797              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2798                 return;
2799
2800         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2801             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2802                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2803                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2804                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2805                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2806         }
2807
2808         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2809 }
2810
2811 /* tp->lock is held. */
2812 static int tg3_nvram_lock(struct tg3 *tp)
2813 {
2814         if (tg3_flag(tp, NVRAM)) {
2815                 int i;
2816
2817                 if (tp->nvram_lock_cnt == 0) {
2818                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2819                         for (i = 0; i < 8000; i++) {
2820                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2821                                         break;
2822                                 udelay(20);
2823                         }
2824                         if (i == 8000) {
2825                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2826                                 return -ENODEV;
2827                         }
2828                 }
2829                 tp->nvram_lock_cnt++;
2830         }
2831         return 0;
2832 }
2833
2834 /* tp->lock is held. */
2835 static void tg3_nvram_unlock(struct tg3 *tp)
2836 {
2837         if (tg3_flag(tp, NVRAM)) {
2838                 if (tp->nvram_lock_cnt > 0)
2839                         tp->nvram_lock_cnt--;
2840                 if (tp->nvram_lock_cnt == 0)
2841                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2842         }
2843 }
2844
2845 /* tp->lock is held. */
2846 static void tg3_enable_nvram_access(struct tg3 *tp)
2847 {
2848         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2849                 u32 nvaccess = tr32(NVRAM_ACCESS);
2850
2851                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2852         }
2853 }
2854
2855 /* tp->lock is held. */
2856 static void tg3_disable_nvram_access(struct tg3 *tp)
2857 {
2858         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2859                 u32 nvaccess = tr32(NVRAM_ACCESS);
2860
2861                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2862         }
2863 }
2864
2865 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2866                                         u32 offset, u32 *val)
2867 {
2868         u32 tmp;
2869         int i;
2870
2871         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2872                 return -EINVAL;
2873
2874         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2875                                         EEPROM_ADDR_DEVID_MASK |
2876                                         EEPROM_ADDR_READ);
2877         tw32(GRC_EEPROM_ADDR,
2878              tmp |
2879              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2880              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2881               EEPROM_ADDR_ADDR_MASK) |
2882              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2883
2884         for (i = 0; i < 1000; i++) {
2885                 tmp = tr32(GRC_EEPROM_ADDR);
2886
2887                 if (tmp & EEPROM_ADDR_COMPLETE)
2888                         break;
2889                 msleep(1);
2890         }
2891         if (!(tmp & EEPROM_ADDR_COMPLETE))
2892                 return -EBUSY;
2893
2894         tmp = tr32(GRC_EEPROM_DATA);
2895
2896         /*
2897          * The data will always be opposite the native endian
2898          * format.  Perform a blind byteswap to compensate.
2899          */
2900         *val = swab32(tmp);
2901
2902         return 0;
2903 }
2904
2905 #define NVRAM_CMD_TIMEOUT 10000
2906
2907 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2908 {
2909         int i;
2910
2911         tw32(NVRAM_CMD, nvram_cmd);
2912         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2913                 udelay(10);
2914                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2915                         udelay(10);
2916                         break;
2917                 }
2918         }
2919
2920         if (i == NVRAM_CMD_TIMEOUT)
2921                 return -EBUSY;
2922
2923         return 0;
2924 }
2925
2926 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2927 {
2928         if (tg3_flag(tp, NVRAM) &&
2929             tg3_flag(tp, NVRAM_BUFFERED) &&
2930             tg3_flag(tp, FLASH) &&
2931             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932             (tp->nvram_jedecnum == JEDEC_ATMEL))
2933
2934                 addr = ((addr / tp->nvram_pagesize) <<
2935                         ATMEL_AT45DB0X1B_PAGE_POS) +
2936                        (addr % tp->nvram_pagesize);
2937
2938         return addr;
2939 }
2940
2941 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2942 {
2943         if (tg3_flag(tp, NVRAM) &&
2944             tg3_flag(tp, NVRAM_BUFFERED) &&
2945             tg3_flag(tp, FLASH) &&
2946             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2947             (tp->nvram_jedecnum == JEDEC_ATMEL))
2948
2949                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2950                         tp->nvram_pagesize) +
2951                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2952
2953         return addr;
2954 }
2955
2956 /* NOTE: Data read in from NVRAM is byteswapped according to
2957  * the byteswapping settings for all other register accesses.
2958  * tg3 devices are BE devices, so on a BE machine, the data
2959  * returned will be exactly as it is seen in NVRAM.  On a LE
2960  * machine, the 32-bit value will be byteswapped.
2961  */
2962 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2963 {
2964         int ret;
2965
2966         if (!tg3_flag(tp, NVRAM))
2967                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2968
2969         offset = tg3_nvram_phys_addr(tp, offset);
2970
2971         if (offset > NVRAM_ADDR_MSK)
2972                 return -EINVAL;
2973
2974         ret = tg3_nvram_lock(tp);
2975         if (ret)
2976                 return ret;
2977
2978         tg3_enable_nvram_access(tp);
2979
2980         tw32(NVRAM_ADDR, offset);
2981         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2982                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2983
2984         if (ret == 0)
2985                 *val = tr32(NVRAM_RDDATA);
2986
2987         tg3_disable_nvram_access(tp);
2988
2989         tg3_nvram_unlock(tp);
2990
2991         return ret;
2992 }
2993
2994 /* Ensures NVRAM data is in bytestream format. */
2995 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2996 {
2997         u32 v;
2998         int res = tg3_nvram_read(tp, offset, &v);
2999         if (!res)
3000                 *val = cpu_to_be32(v);
3001         return res;
3002 }
3003
3004 #define RX_CPU_SCRATCH_BASE     0x30000
3005 #define RX_CPU_SCRATCH_SIZE     0x04000
3006 #define TX_CPU_SCRATCH_BASE     0x34000
3007 #define TX_CPU_SCRATCH_SIZE     0x04000
3008
3009 /* tp->lock is held. */
3010 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3011 {
3012         int i;
3013
3014         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3015
3016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3017                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3018
3019                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3020                 return 0;
3021         }
3022         if (offset == RX_CPU_BASE) {
3023                 for (i = 0; i < 10000; i++) {
3024                         tw32(offset + CPU_STATE, 0xffffffff);
3025                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3026                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3027                                 break;
3028                 }
3029
3030                 tw32(offset + CPU_STATE, 0xffffffff);
3031                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3032                 udelay(10);
3033         } else {
3034                 for (i = 0; i < 10000; i++) {
3035                         tw32(offset + CPU_STATE, 0xffffffff);
3036                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3037                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3038                                 break;
3039                 }
3040         }
3041
3042         if (i >= 10000) {
3043                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3044                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3045                 return -ENODEV;
3046         }
3047
3048         /* Clear firmware's nvram arbitration. */
3049         if (tg3_flag(tp, NVRAM))
3050                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3051         return 0;
3052 }
3053
3054 struct fw_info {
3055         unsigned int fw_base;
3056         unsigned int fw_len;
3057         const __be32 *fw_data;
3058 };
3059
3060 /* tp->lock is held. */
3061 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3062                                  u32 cpu_scratch_base, int cpu_scratch_size,
3063                                  struct fw_info *info)
3064 {
3065         int err, lock_err, i;
3066         void (*write_op)(struct tg3 *, u32, u32);
3067
3068         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3069                 netdev_err(tp->dev,
3070                            "%s: Trying to load TX cpu firmware which is 5705\n",
3071                            __func__);
3072                 return -EINVAL;
3073         }
3074
3075         if (tg3_flag(tp, 5705_PLUS))
3076                 write_op = tg3_write_mem;
3077         else
3078                 write_op = tg3_write_indirect_reg32;
3079
3080         /* It is possible that bootcode is still loading at this point.
3081          * Get the nvram lock first before halting the cpu.
3082          */
3083         lock_err = tg3_nvram_lock(tp);
3084         err = tg3_halt_cpu(tp, cpu_base);
3085         if (!lock_err)
3086                 tg3_nvram_unlock(tp);
3087         if (err)
3088                 goto out;
3089
3090         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3091                 write_op(tp, cpu_scratch_base + i, 0);
3092         tw32(cpu_base + CPU_STATE, 0xffffffff);
3093         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3094         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3095                 write_op(tp, (cpu_scratch_base +
3096                               (info->fw_base & 0xffff) +
3097                               (i * sizeof(u32))),
3098                               be32_to_cpu(info->fw_data[i]));
3099
3100         err = 0;
3101
3102 out:
3103         return err;
3104 }
3105
3106 /* tp->lock is held. */
3107 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3108 {
3109         struct fw_info info;
3110         const __be32 *fw_data;
3111         int err, i;
3112
3113         fw_data = (void *)tp->fw->data;
3114
3115         /* Firmware blob starts with version numbers, followed by
3116            start address and length. We are setting complete length.
3117            length = end_address_of_bss - start_address_of_text.
3118            Remainder is the blob to be loaded contiguously
3119            from start address. */
3120
3121         info.fw_base = be32_to_cpu(fw_data[1]);
3122         info.fw_len = tp->fw->size - 12;
3123         info.fw_data = &fw_data[3];
3124
3125         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3126                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3127                                     &info);
3128         if (err)
3129                 return err;
3130
3131         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3132                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3133                                     &info);
3134         if (err)
3135                 return err;
3136
3137         /* Now startup only the RX cpu. */
3138         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3139         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3140
3141         for (i = 0; i < 5; i++) {
3142                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3143                         break;
3144                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3145                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3146                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3147                 udelay(1000);
3148         }
3149         if (i >= 5) {
3150                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3151                            "should be %08x\n", __func__,
3152                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3153                 return -ENODEV;
3154         }
3155         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3156         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3157
3158         return 0;
3159 }
3160
3161 /* tp->lock is held. */
3162 static int tg3_load_tso_firmware(struct tg3 *tp)
3163 {
3164         struct fw_info info;
3165         const __be32 *fw_data;
3166         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3167         int err, i;
3168
3169         if (tg3_flag(tp, HW_TSO_1) ||
3170             tg3_flag(tp, HW_TSO_2) ||
3171             tg3_flag(tp, HW_TSO_3))
3172                 return 0;
3173
3174         fw_data = (void *)tp->fw->data;
3175
3176         /* Firmware blob starts with version numbers, followed by
3177            start address and length. We are setting complete length.
3178            length = end_address_of_bss - start_address_of_text.
3179            Remainder is the blob to be loaded contiguously
3180            from start address. */
3181
3182         info.fw_base = be32_to_cpu(fw_data[1]);
3183         cpu_scratch_size = tp->fw_len;
3184         info.fw_len = tp->fw->size - 12;
3185         info.fw_data = &fw_data[3];
3186
3187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3188                 cpu_base = RX_CPU_BASE;
3189                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3190         } else {
3191                 cpu_base = TX_CPU_BASE;
3192                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3193                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3194         }
3195
3196         err = tg3_load_firmware_cpu(tp, cpu_base,
3197                                     cpu_scratch_base, cpu_scratch_size,
3198                                     &info);
3199         if (err)
3200                 return err;
3201
3202         /* Now startup the cpu. */
3203         tw32(cpu_base + CPU_STATE, 0xffffffff);
3204         tw32_f(cpu_base + CPU_PC, info.fw_base);
3205
3206         for (i = 0; i < 5; i++) {
3207                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3208                         break;
3209                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3210                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3211                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3212                 udelay(1000);
3213         }
3214         if (i >= 5) {
3215                 netdev_err(tp->dev,
3216                            "%s fails to set CPU PC, is %08x should be %08x\n",
3217                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3218                 return -ENODEV;
3219         }
3220         tw32(cpu_base + CPU_STATE, 0xffffffff);
3221         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3222         return 0;
3223 }
3224
3225
3226 /* tp->lock is held. */
3227 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3228 {
3229         u32 addr_high, addr_low;
3230         int i;
3231
3232         addr_high = ((tp->dev->dev_addr[0] << 8) |
3233                      tp->dev->dev_addr[1]);
3234         addr_low = ((tp->dev->dev_addr[2] << 24) |
3235                     (tp->dev->dev_addr[3] << 16) |
3236                     (tp->dev->dev_addr[4] <<  8) |
3237                     (tp->dev->dev_addr[5] <<  0));
3238         for (i = 0; i < 4; i++) {
3239                 if (i == 1 && skip_mac_1)
3240                         continue;
3241                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3242                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3243         }
3244
3245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3246             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3247                 for (i = 0; i < 12; i++) {
3248                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3249                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3250                 }
3251         }
3252
3253         addr_high = (tp->dev->dev_addr[0] +
3254                      tp->dev->dev_addr[1] +
3255                      tp->dev->dev_addr[2] +
3256                      tp->dev->dev_addr[3] +
3257                      tp->dev->dev_addr[4] +
3258                      tp->dev->dev_addr[5]) &
3259                 TX_BACKOFF_SEED_MASK;
3260         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3261 }
3262
3263 static void tg3_enable_register_access(struct tg3 *tp)
3264 {
3265         /*
3266          * Make sure register accesses (indirect or otherwise) will function
3267          * correctly.
3268          */
3269         pci_write_config_dword(tp->pdev,
3270                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3271 }
3272
3273 static int tg3_power_up(struct tg3 *tp)
3274 {
3275         int err;
3276
3277         tg3_enable_register_access(tp);
3278
3279         err = pci_set_power_state(tp->pdev, PCI_D0);
3280         if (!err) {
3281                 /* Switch out of Vaux if it is a NIC */
3282                 tg3_pwrsrc_switch_to_vmain(tp);
3283         } else {
3284                 netdev_err(tp->dev, "Transition to D0 failed\n");
3285         }
3286
3287         return err;
3288 }
3289
3290 static int tg3_power_down_prepare(struct tg3 *tp)
3291 {
3292         u32 misc_host_ctrl;
3293         bool device_should_wake, do_low_power;
3294
3295         tg3_enable_register_access(tp);
3296
3297         /* Restore the CLKREQ setting. */
3298         if (tg3_flag(tp, CLKREQ_BUG)) {
3299                 u16 lnkctl;
3300
3301                 pci_read_config_word(tp->pdev,
3302                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3303                                      &lnkctl);
3304                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3305                 pci_write_config_word(tp->pdev,
3306                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3307                                       lnkctl);
3308         }
3309
3310         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3311         tw32(TG3PCI_MISC_HOST_CTRL,
3312              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3313
3314         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3315                              tg3_flag(tp, WOL_ENABLE);
3316
3317         if (tg3_flag(tp, USE_PHYLIB)) {
3318                 do_low_power = false;
3319                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3320                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3321                         struct phy_device *phydev;
3322                         u32 phyid, advertising;
3323
3324                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3325
3326                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3327
3328                         tp->link_config.orig_speed = phydev->speed;
3329                         tp->link_config.orig_duplex = phydev->duplex;
3330                         tp->link_config.orig_autoneg = phydev->autoneg;
3331                         tp->link_config.orig_advertising = phydev->advertising;
3332
3333                         advertising = ADVERTISED_TP |
3334                                       ADVERTISED_Pause |
3335                                       ADVERTISED_Autoneg |
3336                                       ADVERTISED_10baseT_Half;
3337
3338                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3339                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3340                                         advertising |=
3341                                                 ADVERTISED_100baseT_Half |
3342                                                 ADVERTISED_100baseT_Full |
3343                                                 ADVERTISED_10baseT_Full;
3344                                 else
3345                                         advertising |= ADVERTISED_10baseT_Full;
3346                         }
3347
3348                         phydev->advertising = advertising;
3349
3350                         phy_start_aneg(phydev);
3351
3352                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3353                         if (phyid != PHY_ID_BCMAC131) {
3354                                 phyid &= PHY_BCM_OUI_MASK;
3355                                 if (phyid == PHY_BCM_OUI_1 ||
3356                                     phyid == PHY_BCM_OUI_2 ||
3357                                     phyid == PHY_BCM_OUI_3)
3358                                         do_low_power = true;
3359                         }
3360                 }
3361         } else {
3362                 do_low_power = true;
3363
3364                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3365                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3366                         tp->link_config.orig_speed = tp->link_config.speed;
3367                         tp->link_config.orig_duplex = tp->link_config.duplex;
3368                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3369                 }
3370
3371                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3372                         tp->link_config.speed = SPEED_10;
3373                         tp->link_config.duplex = DUPLEX_HALF;
3374                         tp->link_config.autoneg = AUTONEG_ENABLE;
3375                         tg3_setup_phy(tp, 0);
3376                 }
3377         }
3378
3379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3380                 u32 val;
3381
3382                 val = tr32(GRC_VCPU_EXT_CTRL);
3383                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3384         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3385                 int i;
3386                 u32 val;
3387
3388                 for (i = 0; i < 200; i++) {
3389                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3390                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3391                                 break;
3392                         msleep(1);
3393                 }
3394         }
3395         if (tg3_flag(tp, WOL_CAP))
3396                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3397                                                      WOL_DRV_STATE_SHUTDOWN |
3398                                                      WOL_DRV_WOL |
3399                                                      WOL_SET_MAGIC_PKT);
3400
3401         if (device_should_wake) {
3402                 u32 mac_mode;
3403
3404                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3405                         if (do_low_power &&
3406                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3407                                 tg3_phy_auxctl_write(tp,
3408                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3409                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3410                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3411                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3412                                 udelay(40);
3413                         }
3414
3415                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3416                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3417                         else
3418                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3419
3420                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3421                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3422                             ASIC_REV_5700) {
3423                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3424                                              SPEED_100 : SPEED_10;
3425                                 if (tg3_5700_link_polarity(tp, speed))
3426                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3427                                 else
3428                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3429                         }
3430                 } else {
3431                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3432                 }
3433
3434                 if (!tg3_flag(tp, 5750_PLUS))
3435                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3436
3437                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3438                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3439                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3440                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3441
3442                 if (tg3_flag(tp, ENABLE_APE))
3443                         mac_mode |= MAC_MODE_APE_TX_EN |
3444                                     MAC_MODE_APE_RX_EN |
3445                                     MAC_MODE_TDE_ENABLE;
3446
3447                 tw32_f(MAC_MODE, mac_mode);
3448                 udelay(100);
3449
3450                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3451                 udelay(10);
3452         }
3453
3454         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3455             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3456              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3457                 u32 base_val;
3458
3459                 base_val = tp->pci_clock_ctrl;
3460                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3461                              CLOCK_CTRL_TXCLK_DISABLE);
3462
3463                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3464                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3465         } else if (tg3_flag(tp, 5780_CLASS) ||
3466                    tg3_flag(tp, CPMU_PRESENT) ||
3467                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3468                 /* do nothing */
3469         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3470                 u32 newbits1, newbits2;
3471
3472                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3475                                     CLOCK_CTRL_TXCLK_DISABLE |
3476                                     CLOCK_CTRL_ALTCLK);
3477                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3478                 } else if (tg3_flag(tp, 5705_PLUS)) {
3479                         newbits1 = CLOCK_CTRL_625_CORE;
3480                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3481                 } else {
3482                         newbits1 = CLOCK_CTRL_ALTCLK;
3483                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3484                 }
3485
3486                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3487                             40);
3488
3489                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3490                             40);
3491
3492                 if (!tg3_flag(tp, 5705_PLUS)) {
3493                         u32 newbits3;
3494
3495                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3496                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3497                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3498                                             CLOCK_CTRL_TXCLK_DISABLE |
3499                                             CLOCK_CTRL_44MHZ_CORE);
3500                         } else {
3501                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3502                         }
3503
3504                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3505                                     tp->pci_clock_ctrl | newbits3, 40);
3506                 }
3507         }
3508
3509         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3510                 tg3_power_down_phy(tp, do_low_power);
3511
3512         tg3_frob_aux_power(tp, true);
3513
3514         /* Workaround for unstable PLL clock */
3515         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3516             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3517                 u32 val = tr32(0x7d00);
3518
3519                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3520                 tw32(0x7d00, val);
3521                 if (!tg3_flag(tp, ENABLE_ASF)) {
3522                         int err;
3523
3524                         err = tg3_nvram_lock(tp);
3525                         tg3_halt_cpu(tp, RX_CPU_BASE);
3526                         if (!err)
3527                                 tg3_nvram_unlock(tp);
3528                 }
3529         }
3530
3531         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3532
3533         return 0;
3534 }
3535
3536 static void tg3_power_down(struct tg3 *tp)
3537 {
3538         tg3_power_down_prepare(tp);
3539
3540         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3541         pci_set_power_state(tp->pdev, PCI_D3hot);
3542 }
3543
3544 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3545 {
3546         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3547         case MII_TG3_AUX_STAT_10HALF:
3548                 *speed = SPEED_10;
3549                 *duplex = DUPLEX_HALF;
3550                 break;
3551
3552         case MII_TG3_AUX_STAT_10FULL:
3553                 *speed = SPEED_10;
3554                 *duplex = DUPLEX_FULL;
3555                 break;
3556
3557         case MII_TG3_AUX_STAT_100HALF:
3558                 *speed = SPEED_100;
3559                 *duplex = DUPLEX_HALF;
3560                 break;
3561
3562         case MII_TG3_AUX_STAT_100FULL:
3563                 *speed = SPEED_100;
3564                 *duplex = DUPLEX_FULL;
3565                 break;
3566
3567         case MII_TG3_AUX_STAT_1000HALF:
3568                 *speed = SPEED_1000;
3569                 *duplex = DUPLEX_HALF;
3570                 break;
3571
3572         case MII_TG3_AUX_STAT_1000FULL:
3573                 *speed = SPEED_1000;
3574                 *duplex = DUPLEX_FULL;
3575                 break;
3576
3577         default:
3578                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3579                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3580                                  SPEED_10;
3581                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3582                                   DUPLEX_HALF;
3583                         break;
3584                 }
3585                 *speed = SPEED_INVALID;
3586                 *duplex = DUPLEX_INVALID;
3587                 break;
3588         }
3589 }
3590
3591 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3592 {
3593         int err = 0;
3594         u32 val, new_adv;
3595
3596         new_adv = ADVERTISE_CSMA;
3597         if (advertise & ADVERTISED_10baseT_Half)
3598                 new_adv |= ADVERTISE_10HALF;
3599         if (advertise & ADVERTISED_10baseT_Full)
3600                 new_adv |= ADVERTISE_10FULL;
3601         if (advertise & ADVERTISED_100baseT_Half)
3602                 new_adv |= ADVERTISE_100HALF;
3603         if (advertise & ADVERTISED_100baseT_Full)
3604                 new_adv |= ADVERTISE_100FULL;
3605
3606         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3607
3608         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3609         if (err)
3610                 goto done;
3611
3612         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3613                 goto done;
3614
3615         new_adv = 0;
3616         if (advertise & ADVERTISED_1000baseT_Half)
3617                 new_adv |= ADVERTISE_1000HALF;
3618         if (advertise & ADVERTISED_1000baseT_Full)
3619                 new_adv |= ADVERTISE_1000FULL;
3620
3621         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3622             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3623                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3624
3625         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3626         if (err)
3627                 goto done;
3628
3629         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3630                 goto done;
3631
3632         tw32(TG3_CPMU_EEE_MODE,
3633              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3634
3635         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3636         if (!err) {
3637                 u32 err2;
3638
3639                 val = 0;
3640                 /* Advertise 100-BaseTX EEE ability */
3641                 if (advertise & ADVERTISED_100baseT_Full)
3642                         val |= MDIO_AN_EEE_ADV_100TX;
3643                 /* Advertise 1000-BaseT EEE ability */
3644                 if (advertise & ADVERTISED_1000baseT_Full)
3645                         val |= MDIO_AN_EEE_ADV_1000T;
3646                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3647                 if (err)
3648                         val = 0;
3649
3650                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3651                 case ASIC_REV_5717:
3652                 case ASIC_REV_57765:
3653                 case ASIC_REV_5719:
3654                         /* If we advertised any eee advertisements above... */
3655                         if (val)
3656                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3657                                       MII_TG3_DSP_TAP26_RMRXSTO |
3658                                       MII_TG3_DSP_TAP26_OPCSINPT;
3659                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3660                         /* Fall through */
3661                 case ASIC_REV_5720:
3662                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3663                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3664                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3665                 }
3666
3667                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3668                 if (!err)
3669                         err = err2;
3670         }
3671
3672 done:
3673         return err;
3674 }
3675
3676 static void tg3_phy_copper_begin(struct tg3 *tp)
3677 {
3678         u32 new_adv;
3679         int i;
3680
3681         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3682                 new_adv = ADVERTISED_10baseT_Half |
3683                           ADVERTISED_10baseT_Full;
3684                 if (tg3_flag(tp, WOL_SPEED_100MB))
3685                         new_adv |= ADVERTISED_100baseT_Half |
3686                                    ADVERTISED_100baseT_Full;
3687
3688                 tg3_phy_autoneg_cfg(tp, new_adv,
3689                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3690         } else if (tp->link_config.speed == SPEED_INVALID) {
3691                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3692                         tp->link_config.advertising &=
3693                                 ~(ADVERTISED_1000baseT_Half |
3694                                   ADVERTISED_1000baseT_Full);
3695
3696                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3697                                     tp->link_config.flowctrl);
3698         } else {
3699                 /* Asking for a specific link mode. */
3700                 if (tp->link_config.speed == SPEED_1000) {
3701                         if (tp->link_config.duplex == DUPLEX_FULL)
3702                                 new_adv = ADVERTISED_1000baseT_Full;
3703                         else
3704                                 new_adv = ADVERTISED_1000baseT_Half;
3705                 } else if (tp->link_config.speed == SPEED_100) {
3706                         if (tp->link_config.duplex == DUPLEX_FULL)
3707                                 new_adv = ADVERTISED_100baseT_Full;
3708                         else
3709                                 new_adv = ADVERTISED_100baseT_Half;
3710                 } else {
3711                         if (tp->link_config.duplex == DUPLEX_FULL)
3712                                 new_adv = ADVERTISED_10baseT_Full;
3713                         else
3714                                 new_adv = ADVERTISED_10baseT_Half;
3715                 }
3716
3717                 tg3_phy_autoneg_cfg(tp, new_adv,
3718                                     tp->link_config.flowctrl);
3719         }
3720
3721         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3722             tp->link_config.speed != SPEED_INVALID) {
3723                 u32 bmcr, orig_bmcr;
3724
3725                 tp->link_config.active_speed = tp->link_config.speed;
3726                 tp->link_config.active_duplex = tp->link_config.duplex;
3727
3728                 bmcr = 0;
3729                 switch (tp->link_config.speed) {
3730                 default:
3731                 case SPEED_10:
3732                         break;
3733
3734                 case SPEED_100:
3735                         bmcr |= BMCR_SPEED100;
3736                         break;
3737
3738                 case SPEED_1000:
3739                         bmcr |= BMCR_SPEED1000;
3740                         break;
3741                 }
3742
3743                 if (tp->link_config.duplex == DUPLEX_FULL)
3744                         bmcr |= BMCR_FULLDPLX;
3745
3746                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3747                     (bmcr != orig_bmcr)) {
3748                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3749                         for (i = 0; i < 1500; i++) {
3750                                 u32 tmp;
3751
3752                                 udelay(10);
3753                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3754                                     tg3_readphy(tp, MII_BMSR, &tmp))
3755                                         continue;
3756                                 if (!(tmp & BMSR_LSTATUS)) {
3757                                         udelay(40);
3758                                         break;
3759                                 }
3760                         }
3761                         tg3_writephy(tp, MII_BMCR, bmcr);
3762                         udelay(40);
3763                 }
3764         } else {
3765                 tg3_writephy(tp, MII_BMCR,
3766                              BMCR_ANENABLE | BMCR_ANRESTART);
3767         }
3768 }
3769
3770 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3771 {
3772         int err;
3773
3774         /* Turn off tap power management. */
3775         /* Set Extended packet length bit */
3776         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3777
3778         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3779         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3780         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3781         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3782         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3783
3784         udelay(40);
3785
3786         return err;
3787 }
3788
3789 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3790 {
3791         u32 adv_reg, all_mask = 0;
3792
3793         if (mask & ADVERTISED_10baseT_Half)
3794                 all_mask |= ADVERTISE_10HALF;
3795         if (mask & ADVERTISED_10baseT_Full)
3796                 all_mask |= ADVERTISE_10FULL;
3797         if (mask & ADVERTISED_100baseT_Half)
3798                 all_mask |= ADVERTISE_100HALF;
3799         if (mask & ADVERTISED_100baseT_Full)
3800                 all_mask |= ADVERTISE_100FULL;
3801
3802         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3803                 return 0;
3804
3805         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3806                 return 0;
3807
3808         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3809                 u32 tg3_ctrl;
3810
3811                 all_mask = 0;
3812                 if (mask & ADVERTISED_1000baseT_Half)
3813                         all_mask |= ADVERTISE_1000HALF;
3814                 if (mask & ADVERTISED_1000baseT_Full)
3815                         all_mask |= ADVERTISE_1000FULL;
3816
3817                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3818                         return 0;
3819
3820                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3821                 if (tg3_ctrl != all_mask)
3822                         return 0;
3823         }
3824
3825         return 1;
3826 }
3827
3828 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3829 {
3830         u32 curadv, reqadv;
3831
3832         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3833                 return 1;
3834
3835         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3836         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3837
3838         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3839                 if (curadv != reqadv)
3840                         return 0;
3841
3842                 if (tg3_flag(tp, PAUSE_AUTONEG))
3843                         tg3_readphy(tp, MII_LPA, rmtadv);
3844         } else {
3845                 /* Reprogram the advertisement register, even if it
3846                  * does not affect the current link.  If the link
3847                  * gets renegotiated in the future, we can save an
3848                  * additional renegotiation cycle by advertising
3849                  * it correctly in the first place.
3850                  */
3851                 if (curadv != reqadv) {
3852                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3853                                      ADVERTISE_PAUSE_ASYM);
3854                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3855                 }
3856         }
3857
3858         return 1;
3859 }
3860
3861 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3862 {
3863         int current_link_up;
3864         u32 bmsr, val;
3865         u32 lcl_adv, rmt_adv;
3866         u16 current_speed;
3867         u8 current_duplex;
3868         int i, err;
3869
3870         tw32(MAC_EVENT, 0);
3871
3872         tw32_f(MAC_STATUS,
3873              (MAC_STATUS_SYNC_CHANGED |
3874               MAC_STATUS_CFG_CHANGED |
3875               MAC_STATUS_MI_COMPLETION |
3876               MAC_STATUS_LNKSTATE_CHANGED));
3877         udelay(40);
3878
3879         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3880                 tw32_f(MAC_MI_MODE,
3881                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3882                 udelay(80);
3883         }
3884
3885         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3886
3887         /* Some third-party PHYs need to be reset on link going
3888          * down.
3889          */
3890         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3891              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3892              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3893             netif_carrier_ok(tp->dev)) {
3894                 tg3_readphy(tp, MII_BMSR, &bmsr);
3895                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3896                     !(bmsr & BMSR_LSTATUS))
3897                         force_reset = 1;
3898         }
3899         if (force_reset)
3900                 tg3_phy_reset(tp);
3901
3902         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3903                 tg3_readphy(tp, MII_BMSR, &bmsr);
3904                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3905                     !tg3_flag(tp, INIT_COMPLETE))
3906                         bmsr = 0;
3907
3908                 if (!(bmsr & BMSR_LSTATUS)) {
3909                         err = tg3_init_5401phy_dsp(tp);
3910                         if (err)
3911                                 return err;
3912
3913                         tg3_readphy(tp, MII_BMSR, &bmsr);
3914                         for (i = 0; i < 1000; i++) {
3915                                 udelay(10);
3916                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3917                                     (bmsr & BMSR_LSTATUS)) {
3918                                         udelay(40);
3919                                         break;
3920                                 }
3921                         }
3922
3923                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3924                             TG3_PHY_REV_BCM5401_B0 &&
3925                             !(bmsr & BMSR_LSTATUS) &&
3926                             tp->link_config.active_speed == SPEED_1000) {
3927                                 err = tg3_phy_reset(tp);
3928                                 if (!err)
3929                                         err = tg3_init_5401phy_dsp(tp);
3930                                 if (err)
3931                                         return err;
3932                         }
3933                 }
3934         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3935                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3936                 /* 5701 {A0,B0} CRC bug workaround */
3937                 tg3_writephy(tp, 0x15, 0x0a75);
3938                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3939                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3940                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3941         }
3942
3943         /* Clear pending interrupts... */
3944         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3945         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3946
3947         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3948                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3949         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3950                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3951
3952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3954                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3955                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3956                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3957                 else
3958                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3959         }
3960
3961         current_link_up = 0;
3962         current_speed = SPEED_INVALID;
3963         current_duplex = DUPLEX_INVALID;
3964
3965         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3966                 err = tg3_phy_auxctl_read(tp,
3967                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3968                                           &val);
3969                 if (!err && !(val & (1 << 10))) {
3970                         tg3_phy_auxctl_write(tp,
3971                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3972                                              val | (1 << 10));
3973                         goto relink;
3974                 }
3975         }
3976
3977         bmsr = 0;
3978         for (i = 0; i < 100; i++) {
3979                 tg3_readphy(tp, MII_BMSR, &bmsr);
3980                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3981                     (bmsr & BMSR_LSTATUS))
3982                         break;
3983                 udelay(40);
3984         }
3985
3986         if (bmsr & BMSR_LSTATUS) {
3987                 u32 aux_stat, bmcr;
3988
3989                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3990                 for (i = 0; i < 2000; i++) {
3991                         udelay(10);
3992                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3993                             aux_stat)
3994                                 break;
3995                 }
3996
3997                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3998                                              &current_speed,
3999                                              &current_duplex);
4000
4001                 bmcr = 0;
4002                 for (i = 0; i < 200; i++) {
4003                         tg3_readphy(tp, MII_BMCR, &bmcr);
4004                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4005                                 continue;
4006                         if (bmcr && bmcr != 0x7fff)
4007                                 break;
4008                         udelay(10);
4009                 }
4010
4011                 lcl_adv = 0;
4012                 rmt_adv = 0;
4013
4014                 tp->link_config.active_speed = current_speed;
4015                 tp->link_config.active_duplex = current_duplex;
4016
4017                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4018                         if ((bmcr & BMCR_ANENABLE) &&
4019                             tg3_copper_is_advertising_all(tp,
4020                                                 tp->link_config.advertising)) {
4021                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4022                                                                   &rmt_adv))
4023                                         current_link_up = 1;
4024                         }
4025                 } else {
4026                         if (!(bmcr & BMCR_ANENABLE) &&
4027                             tp->link_config.speed == current_speed &&
4028                             tp->link_config.duplex == current_duplex &&
4029                             tp->link_config.flowctrl ==
4030                             tp->link_config.active_flowctrl) {
4031                                 current_link_up = 1;
4032                         }
4033                 }
4034
4035                 if (current_link_up == 1 &&
4036                     tp->link_config.active_duplex == DUPLEX_FULL)
4037                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4038         }
4039
4040 relink:
4041         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4042                 tg3_phy_copper_begin(tp);
4043
4044                 tg3_readphy(tp, MII_BMSR, &bmsr);
4045                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4046                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4047                         current_link_up = 1;
4048         }
4049
4050         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4051         if (current_link_up == 1) {
4052                 if (tp->link_config.active_speed == SPEED_100 ||
4053                     tp->link_config.active_speed == SPEED_10)
4054                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4055                 else
4056                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4057         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4058                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4059         else
4060                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4061
4062         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4063         if (tp->link_config.active_duplex == DUPLEX_HALF)
4064                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4065
4066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4067                 if (current_link_up == 1 &&
4068                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4069                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4070                 else
4071                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4072         }
4073
4074         /* ??? Without this setting Netgear GA302T PHY does not
4075          * ??? send/receive packets...
4076          */
4077         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4078             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4079                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4080                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4081                 udelay(80);
4082         }
4083
4084         tw32_f(MAC_MODE, tp->mac_mode);
4085         udelay(40);
4086
4087         tg3_phy_eee_adjust(tp, current_link_up);
4088
4089         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4090                 /* Polled via timer. */
4091                 tw32_f(MAC_EVENT, 0);
4092         } else {
4093                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4094         }
4095         udelay(40);
4096
4097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4098             current_link_up == 1 &&
4099             tp->link_config.active_speed == SPEED_1000 &&
4100             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4101                 udelay(120);
4102                 tw32_f(MAC_STATUS,
4103                      (MAC_STATUS_SYNC_CHANGED |
4104                       MAC_STATUS_CFG_CHANGED));
4105                 udelay(40);
4106                 tg3_write_mem(tp,
4107                               NIC_SRAM_FIRMWARE_MBOX,
4108                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4109         }
4110
4111         /* Prevent send BD corruption. */
4112         if (tg3_flag(tp, CLKREQ_BUG)) {
4113                 u16 oldlnkctl, newlnkctl;
4114
4115                 pci_read_config_word(tp->pdev,
4116                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4117                                      &oldlnkctl);
4118                 if (tp->link_config.active_speed == SPEED_100 ||
4119                     tp->link_config.active_speed == SPEED_10)
4120                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4121                 else
4122                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4123                 if (newlnkctl != oldlnkctl)
4124                         pci_write_config_word(tp->pdev,
4125                                               pci_pcie_cap(tp->pdev) +
4126                                               PCI_EXP_LNKCTL, newlnkctl);
4127         }
4128
4129         if (current_link_up != netif_carrier_ok(tp->dev)) {
4130                 if (current_link_up)
4131                         netif_carrier_on(tp->dev);
4132                 else
4133                         netif_carrier_off(tp->dev);
4134                 tg3_link_report(tp);
4135         }
4136
4137         return 0;
4138 }
4139
4140 struct tg3_fiber_aneginfo {
4141         int state;
4142 #define ANEG_STATE_UNKNOWN              0
4143 #define ANEG_STATE_AN_ENABLE            1
4144 #define ANEG_STATE_RESTART_INIT         2
4145 #define ANEG_STATE_RESTART              3
4146 #define ANEG_STATE_DISABLE_LINK_OK      4
4147 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4148 #define ANEG_STATE_ABILITY_DETECT       6
4149 #define ANEG_STATE_ACK_DETECT_INIT      7
4150 #define ANEG_STATE_ACK_DETECT           8
4151 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4152 #define ANEG_STATE_COMPLETE_ACK         10
4153 #define ANEG_STATE_IDLE_DETECT_INIT     11
4154 #define ANEG_STATE_IDLE_DETECT          12
4155 #define ANEG_STATE_LINK_OK              13
4156 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4157 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4158
4159         u32 flags;
4160 #define MR_AN_ENABLE            0x00000001
4161 #define MR_RESTART_AN           0x00000002
4162 #define MR_AN_COMPLETE          0x00000004
4163 #define MR_PAGE_RX              0x00000008
4164 #define MR_NP_LOADED            0x00000010
4165 #define MR_TOGGLE_TX            0x00000020
4166 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4167 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4168 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4169 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4170 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4171 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4172 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4173 #define MR_TOGGLE_RX            0x00002000
4174 #define MR_NP_RX                0x00004000
4175
4176 #define MR_LINK_OK              0x80000000
4177
4178         unsigned long link_time, cur_time;
4179
4180         u32 ability_match_cfg;
4181         int ability_match_count;
4182
4183         char ability_match, idle_match, ack_match;
4184
4185         u32 txconfig, rxconfig;
4186 #define ANEG_CFG_NP             0x00000080
4187 #define ANEG_CFG_ACK            0x00000040
4188 #define ANEG_CFG_RF2            0x00000020
4189 #define ANEG_CFG_RF1            0x00000010
4190 #define ANEG_CFG_PS2            0x00000001
4191 #define ANEG_CFG_PS1            0x00008000
4192 #define ANEG_CFG_HD             0x00004000
4193 #define ANEG_CFG_FD             0x00002000
4194 #define ANEG_CFG_INVAL          0x00001f06
4195
4196 };
4197 #define ANEG_OK         0
4198 #define ANEG_DONE       1
4199 #define ANEG_TIMER_ENAB 2
4200 #define ANEG_FAILED     -1
4201
4202 #define ANEG_STATE_SETTLE_TIME  10000
4203
4204 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4205                                    struct tg3_fiber_aneginfo *ap)
4206 {
4207         u16 flowctrl;
4208         unsigned long delta;
4209         u32 rx_cfg_reg;
4210         int ret;
4211
4212         if (ap->state == ANEG_STATE_UNKNOWN) {
4213                 ap->rxconfig = 0;
4214                 ap->link_time = 0;
4215                 ap->cur_time = 0;
4216                 ap->ability_match_cfg = 0;
4217                 ap->ability_match_count = 0;
4218                 ap->ability_match = 0;
4219                 ap->idle_match = 0;
4220                 ap->ack_match = 0;
4221         }
4222         ap->cur_time++;
4223
4224         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4225                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4226
4227                 if (rx_cfg_reg != ap->ability_match_cfg) {
4228                         ap->ability_match_cfg = rx_cfg_reg;
4229                         ap->ability_match = 0;
4230                         ap->ability_match_count = 0;
4231                 } else {
4232                         if (++ap->ability_match_count > 1) {
4233                                 ap->ability_match = 1;
4234                                 ap->ability_match_cfg = rx_cfg_reg;
4235                         }
4236                 }
4237                 if (rx_cfg_reg & ANEG_CFG_ACK)
4238                         ap->ack_match = 1;
4239                 else
4240                         ap->ack_match = 0;
4241
4242                 ap->idle_match = 0;
4243         } else {
4244                 ap->idle_match = 1;
4245                 ap->ability_match_cfg = 0;
4246                 ap->ability_match_count = 0;
4247                 ap->ability_match = 0;
4248                 ap->ack_match = 0;
4249
4250                 rx_cfg_reg = 0;
4251         }
4252
4253         ap->rxconfig = rx_cfg_reg;
4254         ret = ANEG_OK;
4255
4256         switch (ap->state) {
4257         case ANEG_STATE_UNKNOWN:
4258                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4259                         ap->state = ANEG_STATE_AN_ENABLE;
4260
4261                 /* fallthru */
4262         case ANEG_STATE_AN_ENABLE:
4263                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4264                 if (ap->flags & MR_AN_ENABLE) {
4265                         ap->link_time = 0;
4266                         ap->cur_time = 0;
4267                         ap->ability_match_cfg = 0;
4268                         ap->ability_match_count = 0;
4269                         ap->ability_match = 0;
4270                         ap->idle_match = 0;
4271                         ap->ack_match = 0;
4272
4273                         ap->state = ANEG_STATE_RESTART_INIT;
4274                 } else {
4275                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4276                 }
4277                 break;
4278
4279         case ANEG_STATE_RESTART_INIT:
4280                 ap->link_time = ap->cur_time;
4281                 ap->flags &= ~(MR_NP_LOADED);
4282                 ap->txconfig = 0;
4283                 tw32(MAC_TX_AUTO_NEG, 0);
4284                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4285                 tw32_f(MAC_MODE, tp->mac_mode);
4286                 udelay(40);
4287
4288                 ret = ANEG_TIMER_ENAB;
4289                 ap->state = ANEG_STATE_RESTART;
4290
4291                 /* fallthru */
4292         case ANEG_STATE_RESTART:
4293                 delta = ap->cur_time - ap->link_time;
4294                 if (delta > ANEG_STATE_SETTLE_TIME)
4295                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4296                 else
4297                         ret = ANEG_TIMER_ENAB;
4298                 break;
4299
4300         case ANEG_STATE_DISABLE_LINK_OK:
4301                 ret = ANEG_DONE;
4302                 break;
4303
4304         case ANEG_STATE_ABILITY_DETECT_INIT:
4305                 ap->flags &= ~(MR_TOGGLE_TX);
4306                 ap->txconfig = ANEG_CFG_FD;
4307                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4308                 if (flowctrl & ADVERTISE_1000XPAUSE)
4309                         ap->txconfig |= ANEG_CFG_PS1;
4310                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4311                         ap->txconfig |= ANEG_CFG_PS2;
4312                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4313                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4314                 tw32_f(MAC_MODE, tp->mac_mode);
4315                 udelay(40);
4316
4317                 ap->state = ANEG_STATE_ABILITY_DETECT;
4318                 break;
4319
4320         case ANEG_STATE_ABILITY_DETECT:
4321                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4322                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4323                 break;
4324
4325         case ANEG_STATE_ACK_DETECT_INIT:
4326                 ap->txconfig |= ANEG_CFG_ACK;
4327                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4328                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4329                 tw32_f(MAC_MODE, tp->mac_mode);
4330                 udelay(40);
4331
4332                 ap->state = ANEG_STATE_ACK_DETECT;
4333
4334                 /* fallthru */
4335         case ANEG_STATE_ACK_DETECT:
4336                 if (ap->ack_match != 0) {
4337                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4338                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4339                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4340                         } else {
4341                                 ap->state = ANEG_STATE_AN_ENABLE;
4342                         }
4343                 } else if (ap->ability_match != 0 &&
4344                            ap->rxconfig == 0) {
4345                         ap->state = ANEG_STATE_AN_ENABLE;
4346                 }
4347                 break;
4348
4349         case ANEG_STATE_COMPLETE_ACK_INIT:
4350                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4351                         ret = ANEG_FAILED;
4352                         break;
4353                 }
4354                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4355                                MR_LP_ADV_HALF_DUPLEX |
4356                                MR_LP_ADV_SYM_PAUSE |
4357                                MR_LP_ADV_ASYM_PAUSE |
4358                                MR_LP_ADV_REMOTE_FAULT1 |
4359                                MR_LP_ADV_REMOTE_FAULT2 |
4360                                MR_LP_ADV_NEXT_PAGE |
4361                                MR_TOGGLE_RX |
4362                                MR_NP_RX);
4363                 if (ap->rxconfig & ANEG_CFG_FD)
4364                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4365                 if (ap->rxconfig & ANEG_CFG_HD)
4366                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4367                 if (ap->rxconfig & ANEG_CFG_PS1)
4368                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4369                 if (ap->rxconfig & ANEG_CFG_PS2)
4370                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4371                 if (ap->rxconfig & ANEG_CFG_RF1)
4372                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4373                 if (ap->rxconfig & ANEG_CFG_RF2)
4374                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4375                 if (ap->rxconfig & ANEG_CFG_NP)
4376                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4377
4378                 ap->link_time = ap->cur_time;
4379
4380                 ap->flags ^= (MR_TOGGLE_TX);
4381                 if (ap->rxconfig & 0x0008)
4382                         ap->flags |= MR_TOGGLE_RX;
4383                 if (ap->rxconfig & ANEG_CFG_NP)
4384                         ap->flags |= MR_NP_RX;
4385                 ap->flags |= MR_PAGE_RX;
4386
4387                 ap->state = ANEG_STATE_COMPLETE_ACK;
4388                 ret = ANEG_TIMER_ENAB;
4389                 break;
4390
4391         case ANEG_STATE_COMPLETE_ACK:
4392                 if (ap->ability_match != 0 &&
4393                     ap->rxconfig == 0) {
4394                         ap->state = ANEG_STATE_AN_ENABLE;
4395                         break;
4396                 }
4397                 delta = ap->cur_time - ap->link_time;
4398                 if (delta > ANEG_STATE_SETTLE_TIME) {
4399                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4400                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4401                         } else {
4402                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4403                                     !(ap->flags & MR_NP_RX)) {
4404                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4405                                 } else {
4406                                         ret = ANEG_FAILED;
4407                                 }
4408                         }
4409                 }
4410                 break;
4411
4412         case ANEG_STATE_IDLE_DETECT_INIT:
4413                 ap->link_time = ap->cur_time;
4414                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4415                 tw32_f(MAC_MODE, tp->mac_mode);
4416                 udelay(40);
4417
4418                 ap->state = ANEG_STATE_IDLE_DETECT;
4419                 ret = ANEG_TIMER_ENAB;
4420                 break;
4421
4422         case ANEG_STATE_IDLE_DETECT:
4423                 if (ap->ability_match != 0 &&
4424                     ap->rxconfig == 0) {
4425                         ap->state = ANEG_STATE_AN_ENABLE;
4426                         break;
4427                 }
4428                 delta = ap->cur_time - ap->link_time;
4429                 if (delta > ANEG_STATE_SETTLE_TIME) {
4430                         /* XXX another gem from the Broadcom driver :( */
4431                         ap->state = ANEG_STATE_LINK_OK;
4432                 }
4433                 break;
4434
4435         case ANEG_STATE_LINK_OK:
4436                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4437                 ret = ANEG_DONE;
4438                 break;
4439
4440         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4441                 /* ??? unimplemented */
4442                 break;
4443
4444         case ANEG_STATE_NEXT_PAGE_WAIT:
4445                 /* ??? unimplemented */
4446                 break;
4447
4448         default:
4449                 ret = ANEG_FAILED;
4450                 break;
4451         }
4452
4453         return ret;
4454 }
4455
4456 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4457 {
4458         int res = 0;
4459         struct tg3_fiber_aneginfo aninfo;
4460         int status = ANEG_FAILED;
4461         unsigned int tick;
4462         u32 tmp;
4463
4464         tw32_f(MAC_TX_AUTO_NEG, 0);
4465
4466         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4467         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4468         udelay(40);
4469
4470         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4471         udelay(40);
4472
4473         memset(&aninfo, 0, sizeof(aninfo));
4474         aninfo.flags |= MR_AN_ENABLE;
4475         aninfo.state = ANEG_STATE_UNKNOWN;
4476         aninfo.cur_time = 0;
4477         tick = 0;
4478         while (++tick < 195000) {
4479                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4480                 if (status == ANEG_DONE || status == ANEG_FAILED)
4481                         break;
4482
4483                 udelay(1);
4484         }
4485
4486         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4487         tw32_f(MAC_MODE, tp->mac_mode);
4488         udelay(40);
4489
4490         *txflags = aninfo.txconfig;
4491         *rxflags = aninfo.flags;
4492
4493         if (status == ANEG_DONE &&
4494             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4495                              MR_LP_ADV_FULL_DUPLEX)))
4496                 res = 1;
4497
4498         return res;
4499 }
4500
4501 static void tg3_init_bcm8002(struct tg3 *tp)
4502 {
4503         u32 mac_status = tr32(MAC_STATUS);
4504         int i;
4505
4506         /* Reset when initting first time or we have a link. */
4507         if (tg3_flag(tp, INIT_COMPLETE) &&
4508             !(mac_status & MAC_STATUS_PCS_SYNCED))
4509                 return;
4510
4511         /* Set PLL lock range. */
4512         tg3_writephy(tp, 0x16, 0x8007);
4513
4514         /* SW reset */
4515         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4516
4517         /* Wait for reset to complete. */
4518         /* XXX schedule_timeout() ... */
4519         for (i = 0; i < 500; i++)
4520                 udelay(10);
4521
4522         /* Config mode; select PMA/Ch 1 regs. */
4523         tg3_writephy(tp, 0x10, 0x8411);
4524
4525         /* Enable auto-lock and comdet, select txclk for tx. */
4526         tg3_writephy(tp, 0x11, 0x0a10);
4527
4528         tg3_writephy(tp, 0x18, 0x00a0);
4529         tg3_writephy(tp, 0x16, 0x41ff);
4530
4531         /* Assert and deassert POR. */
4532         tg3_writephy(tp, 0x13, 0x0400);
4533         udelay(40);
4534         tg3_writephy(tp, 0x13, 0x0000);
4535
4536         tg3_writephy(tp, 0x11, 0x0a50);
4537         udelay(40);
4538         tg3_writephy(tp, 0x11, 0x0a10);
4539
4540         /* Wait for signal to stabilize */
4541         /* XXX schedule_timeout() ... */
4542         for (i = 0; i < 15000; i++)
4543                 udelay(10);
4544
4545         /* Deselect the channel register so we can read the PHYID
4546          * later.
4547          */
4548         tg3_writephy(tp, 0x10, 0x8011);
4549 }
4550
4551 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4552 {
4553         u16 flowctrl;
4554         u32 sg_dig_ctrl, sg_dig_status;
4555         u32 serdes_cfg, expected_sg_dig_ctrl;
4556         int workaround, port_a;
4557         int current_link_up;
4558
4559         serdes_cfg = 0;
4560         expected_sg_dig_ctrl = 0;
4561         workaround = 0;
4562         port_a = 1;
4563         current_link_up = 0;
4564
4565         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4566             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4567                 workaround = 1;
4568                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4569                         port_a = 0;
4570
4571                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4572                 /* preserve bits 20-23 for voltage regulator */
4573                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4574         }
4575
4576         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4577
4578         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4579                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4580                         if (workaround) {
4581                                 u32 val = serdes_cfg;
4582
4583                                 if (port_a)
4584                                         val |= 0xc010000;
4585                                 else
4586                                         val |= 0x4010000;
4587                                 tw32_f(MAC_SERDES_CFG, val);
4588                         }
4589
4590                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4591                 }
4592                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4593                         tg3_setup_flow_control(tp, 0, 0);
4594                         current_link_up = 1;
4595                 }
4596                 goto out;
4597         }
4598
4599         /* Want auto-negotiation.  */
4600         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4601
4602         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4603         if (flowctrl & ADVERTISE_1000XPAUSE)
4604                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4605         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4606                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4607
4608         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4609                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4610                     tp->serdes_counter &&
4611                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4612                                     MAC_STATUS_RCVD_CFG)) ==
4613                      MAC_STATUS_PCS_SYNCED)) {
4614                         tp->serdes_counter--;
4615                         current_link_up = 1;
4616                         goto out;
4617                 }
4618 restart_autoneg:
4619                 if (workaround)
4620                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4621                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4622                 udelay(5);
4623                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4624
4625                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4626                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4627         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4628                                  MAC_STATUS_SIGNAL_DET)) {
4629                 sg_dig_status = tr32(SG_DIG_STATUS);
4630                 mac_status = tr32(MAC_STATUS);
4631
4632                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4633                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4634                         u32 local_adv = 0, remote_adv = 0;
4635
4636                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4637                                 local_adv |= ADVERTISE_1000XPAUSE;
4638                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4639                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4640
4641                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4642                                 remote_adv |= LPA_1000XPAUSE;
4643                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4644                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4645
4646                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4647                         current_link_up = 1;
4648                         tp->serdes_counter = 0;
4649                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4650                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4651                         if (tp->serdes_counter)
4652                                 tp->serdes_counter--;
4653                         else {
4654                                 if (workaround) {
4655                                         u32 val = serdes_cfg;
4656
4657                                         if (port_a)
4658                                                 val |= 0xc010000;
4659                                         else
4660                                                 val |= 0x4010000;
4661
4662                                         tw32_f(MAC_SERDES_CFG, val);
4663                                 }
4664
4665                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4666                                 udelay(40);
4667
4668                                 /* Link parallel detection - link is up */
4669                                 /* only if we have PCS_SYNC and not */
4670                                 /* receiving config code words */
4671                                 mac_status = tr32(MAC_STATUS);
4672                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4673                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4674                                         tg3_setup_flow_control(tp, 0, 0);
4675                                         current_link_up = 1;
4676                                         tp->phy_flags |=
4677                                                 TG3_PHYFLG_PARALLEL_DETECT;
4678                                         tp->serdes_counter =
4679                                                 SERDES_PARALLEL_DET_TIMEOUT;
4680                                 } else
4681                                         goto restart_autoneg;
4682                         }
4683                 }
4684         } else {
4685                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4686                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4687         }
4688
4689 out:
4690         return current_link_up;
4691 }
4692
4693 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4694 {
4695         int current_link_up = 0;
4696
4697         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4698                 goto out;
4699
4700         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4701                 u32 txflags, rxflags;
4702                 int i;
4703
4704                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4705                         u32 local_adv = 0, remote_adv = 0;
4706
4707                         if (txflags & ANEG_CFG_PS1)
4708                                 local_adv |= ADVERTISE_1000XPAUSE;
4709                         if (txflags & ANEG_CFG_PS2)
4710                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4711
4712                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4713                                 remote_adv |= LPA_1000XPAUSE;
4714                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4715                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4716
4717                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4718
4719                         current_link_up = 1;
4720                 }
4721                 for (i = 0; i < 30; i++) {
4722                         udelay(20);
4723                         tw32_f(MAC_STATUS,
4724                                (MAC_STATUS_SYNC_CHANGED |
4725                                 MAC_STATUS_CFG_CHANGED));
4726                         udelay(40);
4727                         if ((tr32(MAC_STATUS) &
4728                              (MAC_STATUS_SYNC_CHANGED |
4729                               MAC_STATUS_CFG_CHANGED)) == 0)
4730                                 break;
4731                 }
4732
4733                 mac_status = tr32(MAC_STATUS);
4734                 if (current_link_up == 0 &&
4735                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4736                     !(mac_status & MAC_STATUS_RCVD_CFG))
4737                         current_link_up = 1;
4738         } else {
4739                 tg3_setup_flow_control(tp, 0, 0);
4740
4741                 /* Forcing 1000FD link up. */
4742                 current_link_up = 1;
4743
4744                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4745                 udelay(40);
4746
4747                 tw32_f(MAC_MODE, tp->mac_mode);
4748                 udelay(40);
4749         }
4750
4751 out:
4752         return current_link_up;
4753 }
4754
4755 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4756 {
4757         u32 orig_pause_cfg;
4758         u16 orig_active_speed;
4759         u8 orig_active_duplex;
4760         u32 mac_status;
4761         int current_link_up;
4762         int i;
4763
4764         orig_pause_cfg = tp->link_config.active_flowctrl;
4765         orig_active_speed = tp->link_config.active_speed;
4766         orig_active_duplex = tp->link_config.active_duplex;
4767
4768         if (!tg3_flag(tp, HW_AUTONEG) &&
4769             netif_carrier_ok(tp->dev) &&
4770             tg3_flag(tp, INIT_COMPLETE)) {
4771                 mac_status = tr32(MAC_STATUS);
4772                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4773                                MAC_STATUS_SIGNAL_DET |
4774                                MAC_STATUS_CFG_CHANGED |
4775                                MAC_STATUS_RCVD_CFG);
4776                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4777                                    MAC_STATUS_SIGNAL_DET)) {
4778                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4779                                             MAC_STATUS_CFG_CHANGED));
4780                         return 0;
4781                 }
4782         }
4783
4784         tw32_f(MAC_TX_AUTO_NEG, 0);
4785
4786         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4787         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4788         tw32_f(MAC_MODE, tp->mac_mode);
4789         udelay(40);
4790
4791         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4792                 tg3_init_bcm8002(tp);
4793
4794         /* Enable link change event even when serdes polling.  */
4795         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4796         udelay(40);
4797
4798         current_link_up = 0;
4799         mac_status = tr32(MAC_STATUS);
4800
4801         if (tg3_flag(tp, HW_AUTONEG))
4802                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4803         else
4804                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4805
4806         tp->napi[0].hw_status->status =
4807                 (SD_STATUS_UPDATED |
4808                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4809
4810         for (i = 0; i < 100; i++) {
4811                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4812                                     MAC_STATUS_CFG_CHANGED));
4813                 udelay(5);
4814                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4815                                          MAC_STATUS_CFG_CHANGED |
4816                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4817                         break;
4818         }
4819
4820         mac_status = tr32(MAC_STATUS);
4821         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4822                 current_link_up = 0;
4823                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4824                     tp->serdes_counter == 0) {
4825                         tw32_f(MAC_MODE, (tp->mac_mode |
4826                                           MAC_MODE_SEND_CONFIGS));
4827                         udelay(1);
4828                         tw32_f(MAC_MODE, tp->mac_mode);
4829                 }
4830         }
4831
4832         if (current_link_up == 1) {
4833                 tp->link_config.active_speed = SPEED_1000;
4834                 tp->link_config.active_duplex = DUPLEX_FULL;
4835                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4836                                     LED_CTRL_LNKLED_OVERRIDE |
4837                                     LED_CTRL_1000MBPS_ON));
4838         } else {
4839                 tp->link_config.active_speed = SPEED_INVALID;
4840                 tp->link_config.active_duplex = DUPLEX_INVALID;
4841                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4842                                     LED_CTRL_LNKLED_OVERRIDE |
4843                                     LED_CTRL_TRAFFIC_OVERRIDE));
4844         }
4845
4846         if (current_link_up != netif_carrier_ok(tp->dev)) {
4847                 if (current_link_up)
4848                         netif_carrier_on(tp->dev);
4849                 else
4850                         netif_carrier_off(tp->dev);
4851                 tg3_link_report(tp);
4852         } else {
4853                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4854                 if (orig_pause_cfg != now_pause_cfg ||
4855                     orig_active_speed != tp->link_config.active_speed ||
4856                     orig_active_duplex != tp->link_config.active_duplex)
4857                         tg3_link_report(tp);
4858         }
4859
4860         return 0;
4861 }
4862
4863 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4864 {
4865         int current_link_up, err = 0;
4866         u32 bmsr, bmcr;
4867         u16 current_speed;
4868         u8 current_duplex;
4869         u32 local_adv, remote_adv;
4870
4871         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4872         tw32_f(MAC_MODE, tp->mac_mode);
4873         udelay(40);
4874
4875         tw32(MAC_EVENT, 0);
4876
4877         tw32_f(MAC_STATUS,
4878              (MAC_STATUS_SYNC_CHANGED |
4879               MAC_STATUS_CFG_CHANGED |
4880               MAC_STATUS_MI_COMPLETION |
4881               MAC_STATUS_LNKSTATE_CHANGED));
4882         udelay(40);
4883
4884         if (force_reset)
4885                 tg3_phy_reset(tp);
4886
4887         current_link_up = 0;
4888         current_speed = SPEED_INVALID;
4889         current_duplex = DUPLEX_INVALID;
4890
4891         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4892         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4893         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4894                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4895                         bmsr |= BMSR_LSTATUS;
4896                 else
4897                         bmsr &= ~BMSR_LSTATUS;
4898         }
4899
4900         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4901
4902         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4903             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4904                 /* do nothing, just check for link up at the end */
4905         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4906                 u32 adv, new_adv;
4907
4908                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4909                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4910                                   ADVERTISE_1000XPAUSE |
4911                                   ADVERTISE_1000XPSE_ASYM |
4912                                   ADVERTISE_SLCT);
4913
4914                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4915
4916                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4917                         new_adv |= ADVERTISE_1000XHALF;
4918                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4919                         new_adv |= ADVERTISE_1000XFULL;
4920
4921                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4922                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4923                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4924                         tg3_writephy(tp, MII_BMCR, bmcr);
4925
4926                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4927                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4928                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4929
4930                         return err;
4931                 }
4932         } else {
4933                 u32 new_bmcr;
4934
4935                 bmcr &= ~BMCR_SPEED1000;
4936                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4937
4938                 if (tp->link_config.duplex == DUPLEX_FULL)
4939                         new_bmcr |= BMCR_FULLDPLX;
4940
4941                 if (new_bmcr != bmcr) {
4942                         /* BMCR_SPEED1000 is a reserved bit that needs
4943                          * to be set on write.
4944                          */
4945                         new_bmcr |= BMCR_SPEED1000;
4946
4947                         /* Force a linkdown */
4948                         if (netif_carrier_ok(tp->dev)) {
4949                                 u32 adv;
4950
4951                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4952                                 adv &= ~(ADVERTISE_1000XFULL |
4953                                          ADVERTISE_1000XHALF |
4954                                          ADVERTISE_SLCT);
4955                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4956                                 tg3_writephy(tp, MII_BMCR, bmcr |
4957                                                            BMCR_ANRESTART |
4958                                                            BMCR_ANENABLE);
4959                                 udelay(10);
4960                                 netif_carrier_off(tp->dev);
4961                         }
4962                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4963                         bmcr = new_bmcr;
4964                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4965                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4966                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4967                             ASIC_REV_5714) {
4968                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4969                                         bmsr |= BMSR_LSTATUS;
4970                                 else
4971                                         bmsr &= ~BMSR_LSTATUS;
4972                         }
4973                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4974                 }
4975         }
4976
4977         if (bmsr & BMSR_LSTATUS) {
4978                 current_speed = SPEED_1000;
4979                 current_link_up = 1;
4980                 if (bmcr & BMCR_FULLDPLX)
4981                         current_duplex = DUPLEX_FULL;
4982                 else
4983                         current_duplex = DUPLEX_HALF;
4984
4985                 local_adv = 0;
4986                 remote_adv = 0;
4987
4988                 if (bmcr & BMCR_ANENABLE) {
4989                         u32 common;
4990
4991                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4992                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4993                         common = local_adv & remote_adv;
4994                         if (common & (ADVERTISE_1000XHALF |
4995                                       ADVERTISE_1000XFULL)) {
4996                                 if (common & ADVERTISE_1000XFULL)
4997                                         current_duplex = DUPLEX_FULL;
4998                                 else
4999                                         current_duplex = DUPLEX_HALF;
5000                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5001                                 /* Link is up via parallel detect */
5002                         } else {
5003                                 current_link_up = 0;
5004                         }
5005                 }
5006         }
5007
5008         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5009                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5010
5011         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012         if (tp->link_config.active_duplex == DUPLEX_HALF)
5013                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5014
5015         tw32_f(MAC_MODE, tp->mac_mode);
5016         udelay(40);
5017
5018         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5019
5020         tp->link_config.active_speed = current_speed;
5021         tp->link_config.active_duplex = current_duplex;
5022
5023         if (current_link_up != netif_carrier_ok(tp->dev)) {
5024                 if (current_link_up)
5025                         netif_carrier_on(tp->dev);
5026                 else {
5027                         netif_carrier_off(tp->dev);
5028                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5029                 }
5030                 tg3_link_report(tp);
5031         }
5032         return err;
5033 }
5034
5035 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5036 {
5037         if (tp->serdes_counter) {
5038                 /* Give autoneg time to complete. */
5039                 tp->serdes_counter--;
5040                 return;
5041         }
5042
5043         if (!netif_carrier_ok(tp->dev) &&
5044             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5045                 u32 bmcr;
5046
5047                 tg3_readphy(tp, MII_BMCR, &bmcr);
5048                 if (bmcr & BMCR_ANENABLE) {
5049                         u32 phy1, phy2;
5050
5051                         /* Select shadow register 0x1f */
5052                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5053                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5054
5055                         /* Select expansion interrupt status register */
5056                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5057                                          MII_TG3_DSP_EXP1_INT_STAT);
5058                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5059                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5060
5061                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5062                                 /* We have signal detect and not receiving
5063                                  * config code words, link is up by parallel
5064                                  * detection.
5065                                  */
5066
5067                                 bmcr &= ~BMCR_ANENABLE;
5068                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5069                                 tg3_writephy(tp, MII_BMCR, bmcr);
5070                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5071                         }
5072                 }
5073         } else if (netif_carrier_ok(tp->dev) &&
5074                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5075                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5076                 u32 phy2;
5077
5078                 /* Select expansion interrupt status register */
5079                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5080                                  MII_TG3_DSP_EXP1_INT_STAT);
5081                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5082                 if (phy2 & 0x20) {
5083                         u32 bmcr;
5084
5085                         /* Config code words received, turn on autoneg. */
5086                         tg3_readphy(tp, MII_BMCR, &bmcr);
5087                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5088
5089                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5090
5091                 }
5092         }
5093 }
5094
5095 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5096 {
5097         u32 val;
5098         int err;
5099
5100         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5101                 err = tg3_setup_fiber_phy(tp, force_reset);
5102         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5103                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5104         else
5105                 err = tg3_setup_copper_phy(tp, force_reset);
5106
5107         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5108                 u32 scale;
5109
5110                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5111                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5112                         scale = 65;
5113                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5114                         scale = 6;
5115                 else
5116                         scale = 12;
5117
5118                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5119                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5120                 tw32(GRC_MISC_CFG, val);
5121         }
5122
5123         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5124               (6 << TX_LENGTHS_IPG_SHIFT);
5125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5126                 val |= tr32(MAC_TX_LENGTHS) &
5127                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5128                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5129
5130         if (tp->link_config.active_speed == SPEED_1000 &&
5131             tp->link_config.active_duplex == DUPLEX_HALF)
5132                 tw32(MAC_TX_LENGTHS, val |
5133                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5134         else
5135                 tw32(MAC_TX_LENGTHS, val |
5136                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5137
5138         if (!tg3_flag(tp, 5705_PLUS)) {
5139                 if (netif_carrier_ok(tp->dev)) {
5140                         tw32(HOSTCC_STAT_COAL_TICKS,
5141                              tp->coal.stats_block_coalesce_usecs);
5142                 } else {
5143                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5144                 }
5145         }
5146
5147         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5148                 val = tr32(PCIE_PWR_MGMT_THRESH);
5149                 if (!netif_carrier_ok(tp->dev))
5150                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5151                               tp->pwrmgmt_thresh;
5152                 else
5153                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5154                 tw32(PCIE_PWR_MGMT_THRESH, val);
5155         }
5156
5157         return err;
5158 }
5159
5160 static inline int tg3_irq_sync(struct tg3 *tp)
5161 {
5162         return tp->irq_sync;
5163 }
5164
5165 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5166 {
5167         int i;
5168
5169         dst = (u32 *)((u8 *)dst + off);
5170         for (i = 0; i < len; i += sizeof(u32))
5171                 *dst++ = tr32(off + i);
5172 }
5173
5174 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5175 {
5176         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5177         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5178         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5179         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5180         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5181         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5182         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5183         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5184         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5185         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5186         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5187         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5188         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5189         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5190         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5191         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5192         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5193         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5194         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5195
5196         if (tg3_flag(tp, SUPPORT_MSIX))
5197                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5198
5199         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5200         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5201         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5202         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5203         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5204         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5205         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5206         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5207
5208         if (!tg3_flag(tp, 5705_PLUS)) {
5209                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5210                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5211                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5212         }
5213
5214         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5215         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5216         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5217         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5218         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5219
5220         if (tg3_flag(tp, NVRAM))
5221                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5222 }
5223
5224 static void tg3_dump_state(struct tg3 *tp)
5225 {
5226         int i;
5227         u32 *regs;
5228
5229         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5230         if (!regs) {
5231                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5232                 return;
5233         }
5234
5235         if (tg3_flag(tp, PCI_EXPRESS)) {
5236                 /* Read up to but not including private PCI registers */
5237                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5238                         regs[i / sizeof(u32)] = tr32(i);
5239         } else
5240                 tg3_dump_legacy_regs(tp, regs);
5241
5242         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5243                 if (!regs[i + 0] && !regs[i + 1] &&
5244                     !regs[i + 2] && !regs[i + 3])
5245                         continue;
5246
5247                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5248                            i * 4,
5249                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5250         }
5251
5252         kfree(regs);
5253
5254         for (i = 0; i < tp->irq_cnt; i++) {
5255                 struct tg3_napi *tnapi = &tp->napi[i];
5256
5257                 /* SW status block */
5258                 netdev_err(tp->dev,
5259                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5260                            i,
5261                            tnapi->hw_status->status,
5262                            tnapi->hw_status->status_tag,
5263                            tnapi->hw_status->rx_jumbo_consumer,
5264                            tnapi->hw_status->rx_consumer,
5265                            tnapi->hw_status->rx_mini_consumer,
5266                            tnapi->hw_status->idx[0].rx_producer,
5267                            tnapi->hw_status->idx[0].tx_consumer);
5268
5269                 netdev_err(tp->dev,
5270                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5271                            i,
5272                            tnapi->last_tag, tnapi->last_irq_tag,
5273                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5274                            tnapi->rx_rcb_ptr,
5275                            tnapi->prodring.rx_std_prod_idx,
5276                            tnapi->prodring.rx_std_cons_idx,
5277                            tnapi->prodring.rx_jmb_prod_idx,
5278                            tnapi->prodring.rx_jmb_cons_idx);
5279         }
5280 }
5281
5282 /* This is called whenever we suspect that the system chipset is re-
5283  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5284  * is bogus tx completions. We try to recover by setting the
5285  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5286  * in the workqueue.
5287  */
5288 static void tg3_tx_recover(struct tg3 *tp)
5289 {
5290         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5291                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5292
5293         netdev_warn(tp->dev,
5294                     "The system may be re-ordering memory-mapped I/O "
5295                     "cycles to the network device, attempting to recover. "
5296                     "Please report the problem to the driver maintainer "
5297                     "and include system chipset information.\n");
5298
5299         spin_lock(&tp->lock);
5300         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5301         spin_unlock(&tp->lock);
5302 }
5303
5304 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5305 {
5306         /* Tell compiler to fetch tx indices from memory. */
5307         barrier();
5308         return tnapi->tx_pending -
5309                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5310 }
5311
5312 /* Tigon3 never reports partial packet sends.  So we do not
5313  * need special logic to handle SKBs that have not had all
5314  * of their frags sent yet, like SunGEM does.
5315  */
5316 static void tg3_tx(struct tg3_napi *tnapi)
5317 {
5318         struct tg3 *tp = tnapi->tp;
5319         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5320         u32 sw_idx = tnapi->tx_cons;
5321         struct netdev_queue *txq;
5322         int index = tnapi - tp->napi;
5323
5324         if (tg3_flag(tp, ENABLE_TSS))
5325                 index--;
5326
5327         txq = netdev_get_tx_queue(tp->dev, index);
5328
5329         while (sw_idx != hw_idx) {
5330                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5331                 struct sk_buff *skb = ri->skb;
5332                 int i, tx_bug = 0;
5333
5334                 if (unlikely(skb == NULL)) {
5335                         tg3_tx_recover(tp);
5336                         return;
5337                 }
5338
5339                 pci_unmap_single(tp->pdev,
5340                                  dma_unmap_addr(ri, mapping),
5341                                  skb_headlen(skb),
5342                                  PCI_DMA_TODEVICE);
5343
5344                 ri->skb = NULL;
5345
5346                 while (ri->fragmented) {
5347                         ri->fragmented = false;
5348                         sw_idx = NEXT_TX(sw_idx);
5349                         ri = &tnapi->tx_buffers[sw_idx];
5350                 }
5351
5352                 sw_idx = NEXT_TX(sw_idx);
5353
5354                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5355                         ri = &tnapi->tx_buffers[sw_idx];
5356                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5357                                 tx_bug = 1;
5358
5359                         pci_unmap_page(tp->pdev,
5360                                        dma_unmap_addr(ri, mapping),
5361                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5362                                        PCI_DMA_TODEVICE);
5363
5364                         while (ri->fragmented) {
5365                                 ri->fragmented = false;
5366                                 sw_idx = NEXT_TX(sw_idx);
5367                                 ri = &tnapi->tx_buffers[sw_idx];
5368                         }
5369
5370                         sw_idx = NEXT_TX(sw_idx);
5371                 }
5372
5373                 dev_kfree_skb(skb);
5374
5375                 if (unlikely(tx_bug)) {
5376                         tg3_tx_recover(tp);
5377                         return;
5378                 }
5379         }
5380
5381         tnapi->tx_cons = sw_idx;
5382
5383         /* Need to make the tx_cons update visible to tg3_start_xmit()
5384          * before checking for netif_queue_stopped().  Without the
5385          * memory barrier, there is a small possibility that tg3_start_xmit()
5386          * will miss it and cause the queue to be stopped forever.
5387          */
5388         smp_mb();
5389
5390         if (unlikely(netif_tx_queue_stopped(txq) &&
5391                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5392                 __netif_tx_lock(txq, smp_processor_id());
5393                 if (netif_tx_queue_stopped(txq) &&
5394                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5395                         netif_tx_wake_queue(txq);
5396                 __netif_tx_unlock(txq);
5397         }
5398 }
5399
5400 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5401 {
5402         if (!ri->skb)
5403                 return;
5404
5405         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5406                          map_sz, PCI_DMA_FROMDEVICE);
5407         dev_kfree_skb_any(ri->skb);
5408         ri->skb = NULL;
5409 }
5410
5411 /* Returns size of skb allocated or < 0 on error.
5412  *
5413  * We only need to fill in the address because the other members
5414  * of the RX descriptor are invariant, see tg3_init_rings.
5415  *
5416  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5417  * posting buffers we only dirty the first cache line of the RX
5418  * descriptor (containing the address).  Whereas for the RX status
5419  * buffers the cpu only reads the last cacheline of the RX descriptor
5420  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5421  */
5422 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5423                             u32 opaque_key, u32 dest_idx_unmasked)
5424 {
5425         struct tg3_rx_buffer_desc *desc;
5426         struct ring_info *map;
5427         struct sk_buff *skb;
5428         dma_addr_t mapping;
5429         int skb_size, dest_idx;
5430
5431         switch (opaque_key) {
5432         case RXD_OPAQUE_RING_STD:
5433                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5434                 desc = &tpr->rx_std[dest_idx];
5435                 map = &tpr->rx_std_buffers[dest_idx];
5436                 skb_size = tp->rx_pkt_map_sz;
5437                 break;
5438
5439         case RXD_OPAQUE_RING_JUMBO:
5440                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5441                 desc = &tpr->rx_jmb[dest_idx].std;
5442                 map = &tpr->rx_jmb_buffers[dest_idx];
5443                 skb_size = TG3_RX_JMB_MAP_SZ;
5444                 break;
5445
5446         default:
5447                 return -EINVAL;
5448         }
5449
5450         /* Do not overwrite any of the map or rp information
5451          * until we are sure we can commit to a new buffer.
5452          *
5453          * Callers depend upon this behavior and assume that
5454          * we leave everything unchanged if we fail.
5455          */
5456         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5457         if (skb == NULL)
5458                 return -ENOMEM;
5459
5460         skb_reserve(skb, TG3_RX_OFFSET(tp));
5461
5462         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5463                                  PCI_DMA_FROMDEVICE);
5464         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5465                 dev_kfree_skb(skb);
5466                 return -EIO;
5467         }
5468
5469         map->skb = skb;
5470         dma_unmap_addr_set(map, mapping, mapping);
5471
5472         desc->addr_hi = ((u64)mapping >> 32);
5473         desc->addr_lo = ((u64)mapping & 0xffffffff);
5474
5475         return skb_size;
5476 }
5477
5478 /* We only need to move over in the address because the other
5479  * members of the RX descriptor are invariant.  See notes above
5480  * tg3_alloc_rx_skb for full details.
5481  */
5482 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5483                            struct tg3_rx_prodring_set *dpr,
5484                            u32 opaque_key, int src_idx,
5485                            u32 dest_idx_unmasked)
5486 {
5487         struct tg3 *tp = tnapi->tp;
5488         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5489         struct ring_info *src_map, *dest_map;
5490         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5491         int dest_idx;
5492
5493         switch (opaque_key) {
5494         case RXD_OPAQUE_RING_STD:
5495                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5496                 dest_desc = &dpr->rx_std[dest_idx];
5497                 dest_map = &dpr->rx_std_buffers[dest_idx];
5498                 src_desc = &spr->rx_std[src_idx];
5499                 src_map = &spr->rx_std_buffers[src_idx];
5500                 break;
5501
5502         case RXD_OPAQUE_RING_JUMBO:
5503                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5504                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5505                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5506                 src_desc = &spr->rx_jmb[src_idx].std;
5507                 src_map = &spr->rx_jmb_buffers[src_idx];
5508                 break;
5509
5510         default:
5511                 return;
5512         }
5513
5514         dest_map->skb = src_map->skb;
5515         dma_unmap_addr_set(dest_map, mapping,
5516                            dma_unmap_addr(src_map, mapping));
5517         dest_desc->addr_hi = src_desc->addr_hi;
5518         dest_desc->addr_lo = src_desc->addr_lo;
5519
5520         /* Ensure that the update to the skb happens after the physical
5521          * addresses have been transferred to the new BD location.
5522          */
5523         smp_wmb();
5524
5525         src_map->skb = NULL;
5526 }
5527
5528 /* The RX ring scheme is composed of multiple rings which post fresh
5529  * buffers to the chip, and one special ring the chip uses to report
5530  * status back to the host.
5531  *
5532  * The special ring reports the status of received packets to the
5533  * host.  The chip does not write into the original descriptor the
5534  * RX buffer was obtained from.  The chip simply takes the original
5535  * descriptor as provided by the host, updates the status and length
5536  * field, then writes this into the next status ring entry.
5537  *
5538  * Each ring the host uses to post buffers to the chip is described
5539  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5540  * it is first placed into the on-chip ram.  When the packet's length
5541  * is known, it walks down the TG3_BDINFO entries to select the ring.
5542  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5543  * which is within the range of the new packet's length is chosen.
5544  *
5545  * The "separate ring for rx status" scheme may sound queer, but it makes
5546  * sense from a cache coherency perspective.  If only the host writes
5547  * to the buffer post rings, and only the chip writes to the rx status
5548  * rings, then cache lines never move beyond shared-modified state.
5549  * If both the host and chip were to write into the same ring, cache line
5550  * eviction could occur since both entities want it in an exclusive state.
5551  */
5552 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5553 {
5554         struct tg3 *tp = tnapi->tp;
5555         u32 work_mask, rx_std_posted = 0;
5556         u32 std_prod_idx, jmb_prod_idx;
5557         u32 sw_idx = tnapi->rx_rcb_ptr;
5558         u16 hw_idx;
5559         int received;
5560         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5561
5562         hw_idx = *(tnapi->rx_rcb_prod_idx);
5563         /*
5564          * We need to order the read of hw_idx and the read of
5565          * the opaque cookie.
5566          */
5567         rmb();
5568         work_mask = 0;
5569         received = 0;
5570         std_prod_idx = tpr->rx_std_prod_idx;
5571         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5572         while (sw_idx != hw_idx && budget > 0) {
5573                 struct ring_info *ri;
5574                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5575                 unsigned int len;
5576                 struct sk_buff *skb;
5577                 dma_addr_t dma_addr;
5578                 u32 opaque_key, desc_idx, *post_ptr;
5579
5580                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5581                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5582                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5583                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5584                         dma_addr = dma_unmap_addr(ri, mapping);
5585                         skb = ri->skb;
5586                         post_ptr = &std_prod_idx;
5587                         rx_std_posted++;
5588                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5589                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5590                         dma_addr = dma_unmap_addr(ri, mapping);
5591                         skb = ri->skb;
5592                         post_ptr = &jmb_prod_idx;
5593                 } else
5594                         goto next_pkt_nopost;
5595
5596                 work_mask |= opaque_key;
5597
5598                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5599                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5600                 drop_it:
5601                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5602                                        desc_idx, *post_ptr);
5603                 drop_it_no_recycle:
5604                         /* Other statistics kept track of by card. */
5605                         tp->rx_dropped++;
5606                         goto next_pkt;
5607                 }
5608
5609                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5610                       ETH_FCS_LEN;
5611
5612                 if (len > TG3_RX_COPY_THRESH(tp)) {
5613                         int skb_size;
5614
5615                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5616                                                     *post_ptr);
5617                         if (skb_size < 0)
5618                                 goto drop_it;
5619
5620                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5621                                          PCI_DMA_FROMDEVICE);
5622
5623                         /* Ensure that the update to the skb happens
5624                          * after the usage of the old DMA mapping.
5625                          */
5626                         smp_wmb();
5627
5628                         ri->skb = NULL;
5629
5630                         skb_put(skb, len);
5631                 } else {
5632                         struct sk_buff *copy_skb;
5633
5634                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5635                                        desc_idx, *post_ptr);
5636
5637                         copy_skb = netdev_alloc_skb(tp->dev, len +
5638                                                     TG3_RAW_IP_ALIGN);
5639                         if (copy_skb == NULL)
5640                                 goto drop_it_no_recycle;
5641
5642                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5643                         skb_put(copy_skb, len);
5644                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5645                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5646                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5647
5648                         /* We'll reuse the original ring buffer. */
5649                         skb = copy_skb;
5650                 }
5651
5652                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5653                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5654                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5655                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5656                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5657                 else
5658                         skb_checksum_none_assert(skb);
5659
5660                 skb->protocol = eth_type_trans(skb, tp->dev);
5661
5662                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5663                     skb->protocol != htons(ETH_P_8021Q)) {
5664                         dev_kfree_skb(skb);
5665                         goto drop_it_no_recycle;
5666                 }
5667
5668                 if (desc->type_flags & RXD_FLAG_VLAN &&
5669                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5670                         __vlan_hwaccel_put_tag(skb,
5671                                                desc->err_vlan & RXD_VLAN_MASK);
5672
5673                 napi_gro_receive(&tnapi->napi, skb);
5674
5675                 received++;
5676                 budget--;
5677
5678 next_pkt:
5679                 (*post_ptr)++;
5680
5681                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5682                         tpr->rx_std_prod_idx = std_prod_idx &
5683                                                tp->rx_std_ring_mask;
5684                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5685                                      tpr->rx_std_prod_idx);
5686                         work_mask &= ~RXD_OPAQUE_RING_STD;
5687                         rx_std_posted = 0;
5688                 }
5689 next_pkt_nopost:
5690                 sw_idx++;
5691                 sw_idx &= tp->rx_ret_ring_mask;
5692
5693                 /* Refresh hw_idx to see if there is new work */
5694                 if (sw_idx == hw_idx) {
5695                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5696                         rmb();
5697                 }
5698         }
5699
5700         /* ACK the status ring. */
5701         tnapi->rx_rcb_ptr = sw_idx;
5702         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5703
5704         /* Refill RX ring(s). */
5705         if (!tg3_flag(tp, ENABLE_RSS)) {
5706                 if (work_mask & RXD_OPAQUE_RING_STD) {
5707                         tpr->rx_std_prod_idx = std_prod_idx &
5708                                                tp->rx_std_ring_mask;
5709                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5710                                      tpr->rx_std_prod_idx);
5711                 }
5712                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5713                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5714                                                tp->rx_jmb_ring_mask;
5715                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5716                                      tpr->rx_jmb_prod_idx);
5717                 }
5718                 mmiowb();
5719         } else if (work_mask) {
5720                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5721                  * updated before the producer indices can be updated.
5722                  */
5723                 smp_wmb();
5724
5725                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5726                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5727
5728                 if (tnapi != &tp->napi[1])
5729                         napi_schedule(&tp->napi[1].napi);
5730         }
5731
5732         return received;
5733 }
5734
5735 static void tg3_poll_link(struct tg3 *tp)
5736 {
5737         /* handle link change and other phy events */
5738         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5739                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5740
5741                 if (sblk->status & SD_STATUS_LINK_CHG) {
5742                         sblk->status = SD_STATUS_UPDATED |
5743                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5744                         spin_lock(&tp->lock);
5745                         if (tg3_flag(tp, USE_PHYLIB)) {
5746                                 tw32_f(MAC_STATUS,
5747                                      (MAC_STATUS_SYNC_CHANGED |
5748                                       MAC_STATUS_CFG_CHANGED |
5749                                       MAC_STATUS_MI_COMPLETION |
5750                                       MAC_STATUS_LNKSTATE_CHANGED));
5751                                 udelay(40);
5752                         } else
5753                                 tg3_setup_phy(tp, 0);
5754                         spin_unlock(&tp->lock);
5755                 }
5756         }
5757 }
5758
5759 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5760                                 struct tg3_rx_prodring_set *dpr,
5761                                 struct tg3_rx_prodring_set *spr)
5762 {
5763         u32 si, di, cpycnt, src_prod_idx;
5764         int i, err = 0;
5765
5766         while (1) {
5767                 src_prod_idx = spr->rx_std_prod_idx;
5768
5769                 /* Make sure updates to the rx_std_buffers[] entries and the
5770                  * standard producer index are seen in the correct order.
5771                  */
5772                 smp_rmb();
5773
5774                 if (spr->rx_std_cons_idx == src_prod_idx)
5775                         break;
5776
5777                 if (spr->rx_std_cons_idx < src_prod_idx)
5778                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5779                 else
5780                         cpycnt = tp->rx_std_ring_mask + 1 -
5781                                  spr->rx_std_cons_idx;
5782
5783                 cpycnt = min(cpycnt,
5784                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5785
5786                 si = spr->rx_std_cons_idx;
5787                 di = dpr->rx_std_prod_idx;
5788
5789                 for (i = di; i < di + cpycnt; i++) {
5790                         if (dpr->rx_std_buffers[i].skb) {
5791                                 cpycnt = i - di;
5792                                 err = -ENOSPC;
5793                                 break;
5794                         }
5795                 }
5796
5797                 if (!cpycnt)
5798                         break;
5799
5800                 /* Ensure that updates to the rx_std_buffers ring and the
5801                  * shadowed hardware producer ring from tg3_recycle_skb() are
5802                  * ordered correctly WRT the skb check above.
5803                  */
5804                 smp_rmb();
5805
5806                 memcpy(&dpr->rx_std_buffers[di],
5807                        &spr->rx_std_buffers[si],
5808                        cpycnt * sizeof(struct ring_info));
5809
5810                 for (i = 0; i < cpycnt; i++, di++, si++) {
5811                         struct tg3_rx_buffer_desc *sbd, *dbd;
5812                         sbd = &spr->rx_std[si];
5813                         dbd = &dpr->rx_std[di];
5814                         dbd->addr_hi = sbd->addr_hi;
5815                         dbd->addr_lo = sbd->addr_lo;
5816                 }
5817
5818                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5819                                        tp->rx_std_ring_mask;
5820                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5821                                        tp->rx_std_ring_mask;
5822         }
5823
5824         while (1) {
5825                 src_prod_idx = spr->rx_jmb_prod_idx;
5826
5827                 /* Make sure updates to the rx_jmb_buffers[] entries and
5828                  * the jumbo producer index are seen in the correct order.
5829                  */
5830                 smp_rmb();
5831
5832                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5833                         break;
5834
5835                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5836                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5837                 else
5838                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5839                                  spr->rx_jmb_cons_idx;
5840
5841                 cpycnt = min(cpycnt,
5842                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5843
5844                 si = spr->rx_jmb_cons_idx;
5845                 di = dpr->rx_jmb_prod_idx;
5846
5847                 for (i = di; i < di + cpycnt; i++) {
5848                         if (dpr->rx_jmb_buffers[i].skb) {
5849                                 cpycnt = i - di;
5850                                 err = -ENOSPC;
5851                                 break;
5852                         }
5853                 }
5854
5855                 if (!cpycnt)
5856                         break;
5857
5858                 /* Ensure that updates to the rx_jmb_buffers ring and the
5859                  * shadowed hardware producer ring from tg3_recycle_skb() are
5860                  * ordered correctly WRT the skb check above.
5861                  */
5862                 smp_rmb();
5863
5864                 memcpy(&dpr->rx_jmb_buffers[di],
5865                        &spr->rx_jmb_buffers[si],
5866                        cpycnt * sizeof(struct ring_info));
5867
5868                 for (i = 0; i < cpycnt; i++, di++, si++) {
5869                         struct tg3_rx_buffer_desc *sbd, *dbd;
5870                         sbd = &spr->rx_jmb[si].std;
5871                         dbd = &dpr->rx_jmb[di].std;
5872                         dbd->addr_hi = sbd->addr_hi;
5873                         dbd->addr_lo = sbd->addr_lo;
5874                 }
5875
5876                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5877                                        tp->rx_jmb_ring_mask;
5878                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5879                                        tp->rx_jmb_ring_mask;
5880         }
5881
5882         return err;
5883 }
5884
5885 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5886 {
5887         struct tg3 *tp = tnapi->tp;
5888
5889         /* run TX completion thread */
5890         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5891                 tg3_tx(tnapi);
5892                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5893                         return work_done;
5894         }
5895
5896         /* run RX thread, within the bounds set by NAPI.
5897          * All RX "locking" is done by ensuring outside
5898          * code synchronizes with tg3->napi.poll()
5899          */
5900         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5901                 work_done += tg3_rx(tnapi, budget - work_done);
5902
5903         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5904                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5905                 int i, err = 0;
5906                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5907                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5908
5909                 for (i = 1; i < tp->irq_cnt; i++)
5910                         err |= tg3_rx_prodring_xfer(tp, dpr,
5911                                                     &tp->napi[i].prodring);
5912
5913                 wmb();
5914
5915                 if (std_prod_idx != dpr->rx_std_prod_idx)
5916                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5917                                      dpr->rx_std_prod_idx);
5918
5919                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5920                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5921                                      dpr->rx_jmb_prod_idx);
5922
5923                 mmiowb();
5924
5925                 if (err)
5926                         tw32_f(HOSTCC_MODE, tp->coal_now);
5927         }
5928
5929         return work_done;
5930 }
5931
5932 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5933 {
5934         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5935         struct tg3 *tp = tnapi->tp;
5936         int work_done = 0;
5937         struct tg3_hw_status *sblk = tnapi->hw_status;
5938
5939         while (1) {
5940                 work_done = tg3_poll_work(tnapi, work_done, budget);
5941
5942                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5943                         goto tx_recovery;
5944
5945                 if (unlikely(work_done >= budget))
5946                         break;
5947
5948                 /* tp->last_tag is used in tg3_int_reenable() below
5949                  * to tell the hw how much work has been processed,
5950                  * so we must read it before checking for more work.
5951                  */
5952                 tnapi->last_tag = sblk->status_tag;
5953                 tnapi->last_irq_tag = tnapi->last_tag;
5954                 rmb();
5955
5956                 /* check for RX/TX work to do */
5957                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5958                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5959                         napi_complete(napi);
5960                         /* Reenable interrupts. */
5961                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5962                         mmiowb();
5963                         break;
5964                 }
5965         }
5966
5967         return work_done;
5968
5969 tx_recovery:
5970         /* work_done is guaranteed to be less than budget. */
5971         napi_complete(napi);
5972         schedule_work(&tp->reset_task);
5973         return work_done;
5974 }
5975
5976 static void tg3_process_error(struct tg3 *tp)
5977 {
5978         u32 val;
5979         bool real_error = false;
5980
5981         if (tg3_flag(tp, ERROR_PROCESSED))
5982                 return;
5983
5984         /* Check Flow Attention register */
5985         val = tr32(HOSTCC_FLOW_ATTN);
5986         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5987                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5988                 real_error = true;
5989         }
5990
5991         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5992                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5993                 real_error = true;
5994         }
5995
5996         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5997                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5998                 real_error = true;
5999         }
6000
6001         if (!real_error)
6002                 return;
6003
6004         tg3_dump_state(tp);
6005
6006         tg3_flag_set(tp, ERROR_PROCESSED);
6007         schedule_work(&tp->reset_task);
6008 }
6009
6010 static int tg3_poll(struct napi_struct *napi, int budget)
6011 {
6012         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6013         struct tg3 *tp = tnapi->tp;
6014         int work_done = 0;
6015         struct tg3_hw_status *sblk = tnapi->hw_status;
6016
6017         while (1) {
6018                 if (sblk->status & SD_STATUS_ERROR)
6019                         tg3_process_error(tp);
6020
6021                 tg3_poll_link(tp);
6022
6023                 work_done = tg3_poll_work(tnapi, work_done, budget);
6024
6025                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6026                         goto tx_recovery;
6027
6028                 if (unlikely(work_done >= budget))
6029                         break;
6030
6031                 if (tg3_flag(tp, TAGGED_STATUS)) {
6032                         /* tp->last_tag is used in tg3_int_reenable() below
6033                          * to tell the hw how much work has been processed,
6034                          * so we must read it before checking for more work.
6035                          */
6036                         tnapi->last_tag = sblk->status_tag;
6037                         tnapi->last_irq_tag = tnapi->last_tag;
6038                         rmb();
6039                 } else
6040                         sblk->status &= ~SD_STATUS_UPDATED;
6041
6042                 if (likely(!tg3_has_work(tnapi))) {
6043                         napi_complete(napi);
6044                         tg3_int_reenable(tnapi);
6045                         break;
6046                 }
6047         }
6048
6049         return work_done;
6050
6051 tx_recovery:
6052         /* work_done is guaranteed to be less than budget. */
6053         napi_complete(napi);
6054         schedule_work(&tp->reset_task);
6055         return work_done;
6056 }
6057
6058 static void tg3_napi_disable(struct tg3 *tp)
6059 {
6060         int i;
6061
6062         for (i = tp->irq_cnt - 1; i >= 0; i--)
6063                 napi_disable(&tp->napi[i].napi);
6064 }
6065
6066 static void tg3_napi_enable(struct tg3 *tp)
6067 {
6068         int i;
6069
6070         for (i = 0; i < tp->irq_cnt; i++)
6071                 napi_enable(&tp->napi[i].napi);
6072 }
6073
6074 static void tg3_napi_init(struct tg3 *tp)
6075 {
6076         int i;
6077
6078         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6079         for (i = 1; i < tp->irq_cnt; i++)
6080                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6081 }
6082
6083 static void tg3_napi_fini(struct tg3 *tp)
6084 {
6085         int i;
6086
6087         for (i = 0; i < tp->irq_cnt; i++)
6088                 netif_napi_del(&tp->napi[i].napi);
6089 }
6090
6091 static inline void tg3_netif_stop(struct tg3 *tp)
6092 {
6093         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6094         tg3_napi_disable(tp);
6095         netif_tx_disable(tp->dev);
6096 }
6097
6098 static inline void tg3_netif_start(struct tg3 *tp)
6099 {
6100         /* NOTE: unconditional netif_tx_wake_all_queues is only
6101          * appropriate so long as all callers are assured to
6102          * have free tx slots (such as after tg3_init_hw)
6103          */
6104         netif_tx_wake_all_queues(tp->dev);
6105
6106         tg3_napi_enable(tp);
6107         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6108         tg3_enable_ints(tp);
6109 }
6110
6111 static void tg3_irq_quiesce(struct tg3 *tp)
6112 {
6113         int i;
6114
6115         BUG_ON(tp->irq_sync);
6116
6117         tp->irq_sync = 1;
6118         smp_mb();
6119
6120         for (i = 0; i < tp->irq_cnt; i++)
6121                 synchronize_irq(tp->napi[i].irq_vec);
6122 }
6123
6124 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6125  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6126  * with as well.  Most of the time, this is not necessary except when
6127  * shutting down the device.
6128  */
6129 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6130 {
6131         spin_lock_bh(&tp->lock);
6132         if (irq_sync)
6133                 tg3_irq_quiesce(tp);
6134 }
6135
6136 static inline void tg3_full_unlock(struct tg3 *tp)
6137 {
6138         spin_unlock_bh(&tp->lock);
6139 }
6140
6141 /* One-shot MSI handler - Chip automatically disables interrupt
6142  * after sending MSI so driver doesn't have to do it.
6143  */
6144 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6145 {
6146         struct tg3_napi *tnapi = dev_id;
6147         struct tg3 *tp = tnapi->tp;
6148
6149         prefetch(tnapi->hw_status);
6150         if (tnapi->rx_rcb)
6151                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6152
6153         if (likely(!tg3_irq_sync(tp)))
6154                 napi_schedule(&tnapi->napi);
6155
6156         return IRQ_HANDLED;
6157 }
6158
6159 /* MSI ISR - No need to check for interrupt sharing and no need to
6160  * flush status block and interrupt mailbox. PCI ordering rules
6161  * guarantee that MSI will arrive after the status block.
6162  */
6163 static irqreturn_t tg3_msi(int irq, void *dev_id)
6164 {
6165         struct tg3_napi *tnapi = dev_id;
6166         struct tg3 *tp = tnapi->tp;
6167
6168         prefetch(tnapi->hw_status);
6169         if (tnapi->rx_rcb)
6170                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6171         /*
6172          * Writing any value to intr-mbox-0 clears PCI INTA# and
6173          * chip-internal interrupt pending events.
6174          * Writing non-zero to intr-mbox-0 additional tells the
6175          * NIC to stop sending us irqs, engaging "in-intr-handler"
6176          * event coalescing.
6177          */
6178         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6179         if (likely(!tg3_irq_sync(tp)))
6180                 napi_schedule(&tnapi->napi);
6181
6182         return IRQ_RETVAL(1);
6183 }
6184
6185 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6186 {
6187         struct tg3_napi *tnapi = dev_id;
6188         struct tg3 *tp = tnapi->tp;
6189         struct tg3_hw_status *sblk = tnapi->hw_status;
6190         unsigned int handled = 1;
6191
6192         /* In INTx mode, it is possible for the interrupt to arrive at
6193          * the CPU before the status block posted prior to the interrupt.
6194          * Reading the PCI State register will confirm whether the
6195          * interrupt is ours and will flush the status block.
6196          */
6197         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6198                 if (tg3_flag(tp, CHIP_RESETTING) ||
6199                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6200                         handled = 0;
6201                         goto out;
6202                 }
6203         }
6204
6205         /*
6206          * Writing any value to intr-mbox-0 clears PCI INTA# and
6207          * chip-internal interrupt pending events.
6208          * Writing non-zero to intr-mbox-0 additional tells the
6209          * NIC to stop sending us irqs, engaging "in-intr-handler"
6210          * event coalescing.
6211          *
6212          * Flush the mailbox to de-assert the IRQ immediately to prevent
6213          * spurious interrupts.  The flush impacts performance but
6214          * excessive spurious interrupts can be worse in some cases.
6215          */
6216         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6217         if (tg3_irq_sync(tp))
6218                 goto out;
6219         sblk->status &= ~SD_STATUS_UPDATED;
6220         if (likely(tg3_has_work(tnapi))) {
6221                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6222                 napi_schedule(&tnapi->napi);
6223         } else {
6224                 /* No work, shared interrupt perhaps?  re-enable
6225                  * interrupts, and flush that PCI write
6226                  */
6227                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6228                                0x00000000);
6229         }
6230 out:
6231         return IRQ_RETVAL(handled);
6232 }
6233
6234 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6235 {
6236         struct tg3_napi *tnapi = dev_id;
6237         struct tg3 *tp = tnapi->tp;
6238         struct tg3_hw_status *sblk = tnapi->hw_status;
6239         unsigned int handled = 1;
6240
6241         /* In INTx mode, it is possible for the interrupt to arrive at
6242          * the CPU before the status block posted prior to the interrupt.
6243          * Reading the PCI State register will confirm whether the
6244          * interrupt is ours and will flush the status block.
6245          */
6246         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6247                 if (tg3_flag(tp, CHIP_RESETTING) ||
6248                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6249                         handled = 0;
6250                         goto out;
6251                 }
6252         }
6253
6254         /*
6255          * writing any value to intr-mbox-0 clears PCI INTA# and
6256          * chip-internal interrupt pending events.
6257          * writing non-zero to intr-mbox-0 additional tells the
6258          * NIC to stop sending us irqs, engaging "in-intr-handler"
6259          * event coalescing.
6260          *
6261          * Flush the mailbox to de-assert the IRQ immediately to prevent
6262          * spurious interrupts.  The flush impacts performance but
6263          * excessive spurious interrupts can be worse in some cases.
6264          */
6265         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6266
6267         /*
6268          * In a shared interrupt configuration, sometimes other devices'
6269          * interrupts will scream.  We record the current status tag here
6270          * so that the above check can report that the screaming interrupts
6271          * are unhandled.  Eventually they will be silenced.
6272          */
6273         tnapi->last_irq_tag = sblk->status_tag;
6274
6275         if (tg3_irq_sync(tp))
6276                 goto out;
6277
6278         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6279
6280         napi_schedule(&tnapi->napi);
6281
6282 out:
6283         return IRQ_RETVAL(handled);
6284 }
6285
6286 /* ISR for interrupt test */
6287 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6288 {
6289         struct tg3_napi *tnapi = dev_id;
6290         struct tg3 *tp = tnapi->tp;
6291         struct tg3_hw_status *sblk = tnapi->hw_status;
6292
6293         if ((sblk->status & SD_STATUS_UPDATED) ||
6294             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6295                 tg3_disable_ints(tp);
6296                 return IRQ_RETVAL(1);
6297         }
6298         return IRQ_RETVAL(0);
6299 }
6300
6301 static int tg3_init_hw(struct tg3 *, int);
6302 static int tg3_halt(struct tg3 *, int, int);
6303
6304 /* Restart hardware after configuration changes, self-test, etc.
6305  * Invoked with tp->lock held.
6306  */
6307 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6308         __releases(tp->lock)
6309         __acquires(tp->lock)
6310 {
6311         int err;
6312
6313         err = tg3_init_hw(tp, reset_phy);
6314         if (err) {
6315                 netdev_err(tp->dev,
6316                            "Failed to re-initialize device, aborting\n");
6317                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6318                 tg3_full_unlock(tp);
6319                 del_timer_sync(&tp->timer);
6320                 tp->irq_sync = 0;
6321                 tg3_napi_enable(tp);
6322                 dev_close(tp->dev);
6323                 tg3_full_lock(tp, 0);
6324         }
6325         return err;
6326 }
6327
6328 #ifdef CONFIG_NET_POLL_CONTROLLER
6329 static void tg3_poll_controller(struct net_device *dev)
6330 {
6331         int i;
6332         struct tg3 *tp = netdev_priv(dev);
6333
6334         for (i = 0; i < tp->irq_cnt; i++)
6335                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6336 }
6337 #endif
6338
6339 static void tg3_reset_task(struct work_struct *work)
6340 {
6341         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6342         int err;
6343         unsigned int restart_timer;
6344
6345         tg3_full_lock(tp, 0);
6346
6347         if (!netif_running(tp->dev)) {
6348                 tg3_full_unlock(tp);
6349                 return;
6350         }
6351
6352         tg3_full_unlock(tp);
6353
6354         tg3_phy_stop(tp);
6355
6356         tg3_netif_stop(tp);
6357
6358         tg3_full_lock(tp, 1);
6359
6360         restart_timer = tg3_flag(tp, RESTART_TIMER);
6361         tg3_flag_clear(tp, RESTART_TIMER);
6362
6363         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6364                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6365                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6366                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6367                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6368         }
6369
6370         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6371         err = tg3_init_hw(tp, 1);
6372         if (err)
6373                 goto out;
6374
6375         tg3_netif_start(tp);
6376
6377         if (restart_timer)
6378                 mod_timer(&tp->timer, jiffies + 1);
6379
6380 out:
6381         tg3_full_unlock(tp);
6382
6383         if (!err)
6384                 tg3_phy_start(tp);
6385 }
6386
6387 static void tg3_tx_timeout(struct net_device *dev)
6388 {
6389         struct tg3 *tp = netdev_priv(dev);
6390
6391         if (netif_msg_tx_err(tp)) {
6392                 netdev_err(dev, "transmit timed out, resetting\n");
6393                 tg3_dump_state(tp);
6394         }
6395
6396         schedule_work(&tp->reset_task);
6397 }
6398
6399 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6400 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6401 {
6402         u32 base = (u32) mapping & 0xffffffff;
6403
6404         return (base > 0xffffdcc0) && (base + len + 8 < base);
6405 }
6406
6407 /* Test for DMA addresses > 40-bit */
6408 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6409                                           int len)
6410 {
6411 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6412         if (tg3_flag(tp, 40BIT_DMA_BUG))
6413                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6414         return 0;
6415 #else
6416         return 0;
6417 #endif
6418 }
6419
6420 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6421                                  dma_addr_t mapping, u32 len, u32 flags,
6422                                  u32 mss, u32 vlan)
6423 {
6424         txbd->addr_hi = ((u64) mapping >> 32);
6425         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6426         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6427         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6428 }
6429
6430 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6431                             dma_addr_t map, u32 len, u32 flags,
6432                             u32 mss, u32 vlan)
6433 {
6434         struct tg3 *tp = tnapi->tp;
6435         bool hwbug = false;
6436
6437         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6438                 hwbug = 1;
6439
6440         if (tg3_4g_overflow_test(map, len))
6441                 hwbug = 1;
6442
6443         if (tg3_40bit_overflow_test(tp, map, len))
6444                 hwbug = 1;
6445
6446         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6447                 u32 prvidx = *entry;
6448                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6449                 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6450                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6451                         len -= TG3_TX_BD_DMA_MAX;
6452
6453                         /* Avoid the 8byte DMA problem */
6454                         if (len <= 8) {
6455                                 len += TG3_TX_BD_DMA_MAX / 2;
6456                                 frag_len = TG3_TX_BD_DMA_MAX / 2;
6457                         }
6458
6459                         tnapi->tx_buffers[*entry].fragmented = true;
6460
6461                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462                                       frag_len, tmp_flag, mss, vlan);
6463                         *budget -= 1;
6464                         prvidx = *entry;
6465                         *entry = NEXT_TX(*entry);
6466
6467                         map += frag_len;
6468                 }
6469
6470                 if (len) {
6471                         if (*budget) {
6472                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6473                                               len, flags, mss, vlan);
6474                                 *budget -= 1;
6475                                 *entry = NEXT_TX(*entry);
6476                         } else {
6477                                 hwbug = 1;
6478                                 tnapi->tx_buffers[prvidx].fragmented = false;
6479                         }
6480                 }
6481         } else {
6482                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6483                               len, flags, mss, vlan);
6484                 *entry = NEXT_TX(*entry);
6485         }
6486
6487         return hwbug;
6488 }
6489
6490 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6491 {
6492         int i;
6493         struct sk_buff *skb;
6494         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6495
6496         skb = txb->skb;
6497         txb->skb = NULL;
6498
6499         pci_unmap_single(tnapi->tp->pdev,
6500                          dma_unmap_addr(txb, mapping),
6501                          skb_headlen(skb),
6502                          PCI_DMA_TODEVICE);
6503
6504         while (txb->fragmented) {
6505                 txb->fragmented = false;
6506                 entry = NEXT_TX(entry);
6507                 txb = &tnapi->tx_buffers[entry];
6508         }
6509
6510         for (i = 0; i <= last; i++) {
6511                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6512
6513                 entry = NEXT_TX(entry);
6514                 txb = &tnapi->tx_buffers[entry];
6515
6516                 pci_unmap_page(tnapi->tp->pdev,
6517                                dma_unmap_addr(txb, mapping),
6518                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6519
6520                 while (txb->fragmented) {
6521                         txb->fragmented = false;
6522                         entry = NEXT_TX(entry);
6523                         txb = &tnapi->tx_buffers[entry];
6524                 }
6525         }
6526 }
6527
6528 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6529 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6530                                        struct sk_buff **pskb,
6531                                        u32 *entry, u32 *budget,
6532                                        u32 base_flags, u32 mss, u32 vlan)
6533 {
6534         struct tg3 *tp = tnapi->tp;
6535         struct sk_buff *new_skb, *skb = *pskb;
6536         dma_addr_t new_addr = 0;
6537         int ret = 0;
6538
6539         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6540                 new_skb = skb_copy(skb, GFP_ATOMIC);
6541         else {
6542                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6543
6544                 new_skb = skb_copy_expand(skb,
6545                                           skb_headroom(skb) + more_headroom,
6546                                           skb_tailroom(skb), GFP_ATOMIC);
6547         }
6548
6549         if (!new_skb) {
6550                 ret = -1;
6551         } else {
6552                 /* New SKB is guaranteed to be linear. */
6553                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6554                                           PCI_DMA_TODEVICE);
6555                 /* Make sure the mapping succeeded */
6556                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6557                         dev_kfree_skb(new_skb);
6558                         ret = -1;
6559                 } else {
6560                         u32 save_entry = *entry;
6561
6562                         base_flags |= TXD_FLAG_END;
6563
6564                         tnapi->tx_buffers[*entry].skb = new_skb;
6565                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6566                                            mapping, new_addr);
6567
6568                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569                                             new_skb->len, base_flags,
6570                                             mss, vlan)) {
6571                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6572                                 dev_kfree_skb(new_skb);
6573                                 ret = -1;
6574                         }
6575                 }
6576         }
6577
6578         dev_kfree_skb(skb);
6579         *pskb = new_skb;
6580         return ret;
6581 }
6582
6583 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6584
6585 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6586  * TSO header is greater than 80 bytes.
6587  */
6588 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6589 {
6590         struct sk_buff *segs, *nskb;
6591         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6592
6593         /* Estimate the number of fragments in the worst case */
6594         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6595                 netif_stop_queue(tp->dev);
6596
6597                 /* netif_tx_stop_queue() must be done before checking
6598                  * checking tx index in tg3_tx_avail() below, because in
6599                  * tg3_tx(), we update tx index before checking for
6600                  * netif_tx_queue_stopped().
6601                  */
6602                 smp_mb();
6603                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6604                         return NETDEV_TX_BUSY;
6605
6606                 netif_wake_queue(tp->dev);
6607         }
6608
6609         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6610         if (IS_ERR(segs))
6611                 goto tg3_tso_bug_end;
6612
6613         do {
6614                 nskb = segs;
6615                 segs = segs->next;
6616                 nskb->next = NULL;
6617                 tg3_start_xmit(nskb, tp->dev);
6618         } while (segs);
6619
6620 tg3_tso_bug_end:
6621         dev_kfree_skb(skb);
6622
6623         return NETDEV_TX_OK;
6624 }
6625
6626 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6627  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6628  */
6629 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6630 {
6631         struct tg3 *tp = netdev_priv(dev);
6632         u32 len, entry, base_flags, mss, vlan = 0;
6633         u32 budget;
6634         int i = -1, would_hit_hwbug;
6635         dma_addr_t mapping;
6636         struct tg3_napi *tnapi;
6637         struct netdev_queue *txq;
6638         unsigned int last;
6639
6640         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6641         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6642         if (tg3_flag(tp, ENABLE_TSS))
6643                 tnapi++;
6644
6645         budget = tg3_tx_avail(tnapi);
6646
6647         /* We are running in BH disabled context with netif_tx_lock
6648          * and TX reclaim runs via tp->napi.poll inside of a software
6649          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6650          * no IRQ context deadlocks to worry about either.  Rejoice!
6651          */
6652         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6653                 if (!netif_tx_queue_stopped(txq)) {
6654                         netif_tx_stop_queue(txq);
6655
6656                         /* This is a hard error, log it. */
6657                         netdev_err(dev,
6658                                    "BUG! Tx Ring full when queue awake!\n");
6659                 }
6660                 return NETDEV_TX_BUSY;
6661         }
6662
6663         entry = tnapi->tx_prod;
6664         base_flags = 0;
6665         if (skb->ip_summed == CHECKSUM_PARTIAL)
6666                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6667
6668         mss = skb_shinfo(skb)->gso_size;
6669         if (mss) {
6670                 struct iphdr *iph;
6671                 u32 tcp_opt_len, hdr_len;
6672
6673                 if (skb_header_cloned(skb) &&
6674                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6675                         goto drop;
6676
6677                 iph = ip_hdr(skb);
6678                 tcp_opt_len = tcp_optlen(skb);
6679
6680                 if (skb_is_gso_v6(skb)) {
6681                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6682                 } else {
6683                         u32 ip_tcp_len;
6684
6685                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6686                         hdr_len = ip_tcp_len + tcp_opt_len;
6687
6688                         iph->check = 0;
6689                         iph->tot_len = htons(mss + hdr_len);
6690                 }
6691
6692                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6693                     tg3_flag(tp, TSO_BUG))
6694                         return tg3_tso_bug(tp, skb);
6695
6696                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6697                                TXD_FLAG_CPU_POST_DMA);
6698
6699                 if (tg3_flag(tp, HW_TSO_1) ||
6700                     tg3_flag(tp, HW_TSO_2) ||
6701                     tg3_flag(tp, HW_TSO_3)) {
6702                         tcp_hdr(skb)->check = 0;
6703                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6704                 } else
6705                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6706                                                                  iph->daddr, 0,
6707                                                                  IPPROTO_TCP,
6708                                                                  0);
6709
6710                 if (tg3_flag(tp, HW_TSO_3)) {
6711                         mss |= (hdr_len & 0xc) << 12;
6712                         if (hdr_len & 0x10)
6713                                 base_flags |= 0x00000010;
6714                         base_flags |= (hdr_len & 0x3e0) << 5;
6715                 } else if (tg3_flag(tp, HW_TSO_2))
6716                         mss |= hdr_len << 9;
6717                 else if (tg3_flag(tp, HW_TSO_1) ||
6718                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6719                         if (tcp_opt_len || iph->ihl > 5) {
6720                                 int tsflags;
6721
6722                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6723                                 mss |= (tsflags << 11);
6724                         }
6725                 } else {
6726                         if (tcp_opt_len || iph->ihl > 5) {
6727                                 int tsflags;
6728
6729                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6730                                 base_flags |= tsflags << 12;
6731                         }
6732                 }
6733         }
6734
6735         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6736             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6737                 base_flags |= TXD_FLAG_JMB_PKT;
6738
6739         if (vlan_tx_tag_present(skb)) {
6740                 base_flags |= TXD_FLAG_VLAN;
6741                 vlan = vlan_tx_tag_get(skb);
6742         }
6743
6744         len = skb_headlen(skb);
6745
6746         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6747         if (pci_dma_mapping_error(tp->pdev, mapping))
6748                 goto drop;
6749
6750
6751         tnapi->tx_buffers[entry].skb = skb;
6752         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6753
6754         would_hit_hwbug = 0;
6755
6756         if (tg3_flag(tp, 5701_DMA_BUG))
6757                 would_hit_hwbug = 1;
6758
6759         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6760                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6761                             mss, vlan)) {
6762                 would_hit_hwbug = 1;
6763         /* Now loop through additional data fragments, and queue them. */
6764         } else if (skb_shinfo(skb)->nr_frags > 0) {
6765                 u32 tmp_mss = mss;
6766
6767                 if (!tg3_flag(tp, HW_TSO_1) &&
6768                     !tg3_flag(tp, HW_TSO_2) &&
6769                     !tg3_flag(tp, HW_TSO_3))
6770                         tmp_mss = 0;
6771
6772                 last = skb_shinfo(skb)->nr_frags - 1;
6773                 for (i = 0; i <= last; i++) {
6774                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6775
6776                         len = skb_frag_size(frag);
6777                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6778                                                    len, DMA_TO_DEVICE);
6779
6780                         tnapi->tx_buffers[entry].skb = NULL;
6781                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6782                                            mapping);
6783                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6784                                 goto dma_error;
6785
6786                         if (!budget ||
6787                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6788                                             len, base_flags |
6789                                             ((i == last) ? TXD_FLAG_END : 0),
6790                                             tmp_mss, vlan)) {
6791                                 would_hit_hwbug = 1;
6792                                 break;
6793                         }
6794                 }
6795         }
6796
6797         if (would_hit_hwbug) {
6798                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6799
6800                 /* If the workaround fails due to memory/mapping
6801                  * failure, silently drop this packet.
6802                  */
6803                 entry = tnapi->tx_prod;
6804                 budget = tg3_tx_avail(tnapi);
6805                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6806                                                 base_flags, mss, vlan))
6807                         goto drop_nofree;
6808         }
6809
6810         skb_tx_timestamp(skb);
6811
6812         /* Packets are ready, update Tx producer idx local and on card. */
6813         tw32_tx_mbox(tnapi->prodmbox, entry);
6814
6815         tnapi->tx_prod = entry;
6816         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6817                 netif_tx_stop_queue(txq);
6818
6819                 /* netif_tx_stop_queue() must be done before checking
6820                  * checking tx index in tg3_tx_avail() below, because in
6821                  * tg3_tx(), we update tx index before checking for
6822                  * netif_tx_queue_stopped().
6823                  */
6824                 smp_mb();
6825                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6826                         netif_tx_wake_queue(txq);
6827         }
6828
6829         mmiowb();
6830         return NETDEV_TX_OK;
6831
6832 dma_error:
6833         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6834         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6835 drop:
6836         dev_kfree_skb(skb);
6837 drop_nofree:
6838         tp->tx_dropped++;
6839         return NETDEV_TX_OK;
6840 }
6841
6842 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6843 {
6844         if (enable) {
6845                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6846                                   MAC_MODE_PORT_MODE_MASK);
6847
6848                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6849
6850                 if (!tg3_flag(tp, 5705_PLUS))
6851                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6852
6853                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6854                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6855                 else
6856                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6857         } else {
6858                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6859
6860                 if (tg3_flag(tp, 5705_PLUS) ||
6861                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6862                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6863                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6864         }
6865
6866         tw32(MAC_MODE, tp->mac_mode);
6867         udelay(40);
6868 }
6869
6870 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6871 {
6872         u32 val, bmcr, mac_mode, ptest = 0;
6873
6874         tg3_phy_toggle_apd(tp, false);
6875         tg3_phy_toggle_automdix(tp, 0);
6876
6877         if (extlpbk && tg3_phy_set_extloopbk(tp))
6878                 return -EIO;
6879
6880         bmcr = BMCR_FULLDPLX;
6881         switch (speed) {
6882         case SPEED_10:
6883                 break;
6884         case SPEED_100:
6885                 bmcr |= BMCR_SPEED100;
6886                 break;
6887         case SPEED_1000:
6888         default:
6889                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6890                         speed = SPEED_100;
6891                         bmcr |= BMCR_SPEED100;
6892                 } else {
6893                         speed = SPEED_1000;
6894                         bmcr |= BMCR_SPEED1000;
6895                 }
6896         }
6897
6898         if (extlpbk) {
6899                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6900                         tg3_readphy(tp, MII_CTRL1000, &val);
6901                         val |= CTL1000_AS_MASTER |
6902                                CTL1000_ENABLE_MASTER;
6903                         tg3_writephy(tp, MII_CTRL1000, val);
6904                 } else {
6905                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6906                                 MII_TG3_FET_PTEST_TRIM_2;
6907                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6908                 }
6909         } else
6910                 bmcr |= BMCR_LOOPBACK;
6911
6912         tg3_writephy(tp, MII_BMCR, bmcr);
6913
6914         /* The write needs to be flushed for the FETs */
6915         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6916                 tg3_readphy(tp, MII_BMCR, &bmcr);
6917
6918         udelay(40);
6919
6920         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6921             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6922                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6923                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6924                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6925
6926                 /* The write needs to be flushed for the AC131 */
6927                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6928         }
6929
6930         /* Reset to prevent losing 1st rx packet intermittently */
6931         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6932             tg3_flag(tp, 5780_CLASS)) {
6933                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6934                 udelay(10);
6935                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6936         }
6937
6938         mac_mode = tp->mac_mode &
6939                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6940         if (speed == SPEED_1000)
6941                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6942         else
6943                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6944
6945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6946                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6947
6948                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6949                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6950                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6951                         mac_mode |= MAC_MODE_LINK_POLARITY;
6952
6953                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6954                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6955         }
6956
6957         tw32(MAC_MODE, mac_mode);
6958         udelay(40);
6959
6960         return 0;
6961 }
6962
6963 static void tg3_set_loopback(struct net_device *dev, u32 features)
6964 {
6965         struct tg3 *tp = netdev_priv(dev);
6966
6967         if (features & NETIF_F_LOOPBACK) {
6968                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6969                         return;
6970
6971                 spin_lock_bh(&tp->lock);
6972                 tg3_mac_loopback(tp, true);
6973                 netif_carrier_on(tp->dev);
6974                 spin_unlock_bh(&tp->lock);
6975                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6976         } else {
6977                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6978                         return;
6979
6980                 spin_lock_bh(&tp->lock);
6981                 tg3_mac_loopback(tp, false);
6982                 /* Force link status check */
6983                 tg3_setup_phy(tp, 1);
6984                 spin_unlock_bh(&tp->lock);
6985                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6986         }
6987 }
6988
6989 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6990 {
6991         struct tg3 *tp = netdev_priv(dev);
6992
6993         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6994                 features &= ~NETIF_F_ALL_TSO;
6995
6996         return features;
6997 }
6998
6999 static int tg3_set_features(struct net_device *dev, u32 features)
7000 {
7001         u32 changed = dev->features ^ features;
7002
7003         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7004                 tg3_set_loopback(dev, features);
7005
7006         return 0;
7007 }
7008
7009 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7010                                int new_mtu)
7011 {
7012         dev->mtu = new_mtu;
7013
7014         if (new_mtu > ETH_DATA_LEN) {
7015                 if (tg3_flag(tp, 5780_CLASS)) {
7016                         netdev_update_features(dev);
7017                         tg3_flag_clear(tp, TSO_CAPABLE);
7018                 } else {
7019                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7020                 }
7021         } else {
7022                 if (tg3_flag(tp, 5780_CLASS)) {
7023                         tg3_flag_set(tp, TSO_CAPABLE);
7024                         netdev_update_features(dev);
7025                 }
7026                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7027         }
7028 }
7029
7030 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7031 {
7032         struct tg3 *tp = netdev_priv(dev);
7033         int err;
7034
7035         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7036                 return -EINVAL;
7037
7038         if (!netif_running(dev)) {
7039                 /* We'll just catch it later when the
7040                  * device is up'd.
7041                  */
7042                 tg3_set_mtu(dev, tp, new_mtu);
7043                 return 0;
7044         }
7045
7046         tg3_phy_stop(tp);
7047
7048         tg3_netif_stop(tp);
7049
7050         tg3_full_lock(tp, 1);
7051
7052         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7053
7054         tg3_set_mtu(dev, tp, new_mtu);
7055
7056         err = tg3_restart_hw(tp, 0);
7057
7058         if (!err)
7059                 tg3_netif_start(tp);
7060
7061         tg3_full_unlock(tp);
7062
7063         if (!err)
7064                 tg3_phy_start(tp);
7065
7066         return err;
7067 }
7068
7069 static void tg3_rx_prodring_free(struct tg3 *tp,
7070                                  struct tg3_rx_prodring_set *tpr)
7071 {
7072         int i;
7073
7074         if (tpr != &tp->napi[0].prodring) {
7075                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7076                      i = (i + 1) & tp->rx_std_ring_mask)
7077                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7078                                         tp->rx_pkt_map_sz);
7079
7080                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7081                         for (i = tpr->rx_jmb_cons_idx;
7082                              i != tpr->rx_jmb_prod_idx;
7083                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7084                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7085                                                 TG3_RX_JMB_MAP_SZ);
7086                         }
7087                 }
7088
7089                 return;
7090         }
7091
7092         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7093                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7094                                 tp->rx_pkt_map_sz);
7095
7096         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7097                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7098                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7099                                         TG3_RX_JMB_MAP_SZ);
7100         }
7101 }
7102
7103 /* Initialize rx rings for packet processing.
7104  *
7105  * The chip has been shut down and the driver detached from
7106  * the networking, so no interrupts or new tx packets will
7107  * end up in the driver.  tp->{tx,}lock are held and thus
7108  * we may not sleep.
7109  */
7110 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7111                                  struct tg3_rx_prodring_set *tpr)
7112 {
7113         u32 i, rx_pkt_dma_sz;
7114
7115         tpr->rx_std_cons_idx = 0;
7116         tpr->rx_std_prod_idx = 0;
7117         tpr->rx_jmb_cons_idx = 0;
7118         tpr->rx_jmb_prod_idx = 0;
7119
7120         if (tpr != &tp->napi[0].prodring) {
7121                 memset(&tpr->rx_std_buffers[0], 0,
7122                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7123                 if (tpr->rx_jmb_buffers)
7124                         memset(&tpr->rx_jmb_buffers[0], 0,
7125                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7126                 goto done;
7127         }
7128
7129         /* Zero out all descriptors. */
7130         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7131
7132         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7133         if (tg3_flag(tp, 5780_CLASS) &&
7134             tp->dev->mtu > ETH_DATA_LEN)
7135                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7136         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7137
7138         /* Initialize invariants of the rings, we only set this
7139          * stuff once.  This works because the card does not
7140          * write into the rx buffer posting rings.
7141          */
7142         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7143                 struct tg3_rx_buffer_desc *rxd;
7144
7145                 rxd = &tpr->rx_std[i];
7146                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7147                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7148                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7149                                (i << RXD_OPAQUE_INDEX_SHIFT));
7150         }
7151
7152         /* Now allocate fresh SKBs for each rx ring. */
7153         for (i = 0; i < tp->rx_pending; i++) {
7154                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7155                         netdev_warn(tp->dev,
7156                                     "Using a smaller RX standard ring. Only "
7157                                     "%d out of %d buffers were allocated "
7158                                     "successfully\n", i, tp->rx_pending);
7159                         if (i == 0)
7160                                 goto initfail;
7161                         tp->rx_pending = i;
7162                         break;
7163                 }
7164         }
7165
7166         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7167                 goto done;
7168
7169         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7170
7171         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7172                 goto done;
7173
7174         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7175                 struct tg3_rx_buffer_desc *rxd;
7176
7177                 rxd = &tpr->rx_jmb[i].std;
7178                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7179                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7180                                   RXD_FLAG_JUMBO;
7181                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7182                        (i << RXD_OPAQUE_INDEX_SHIFT));
7183         }
7184
7185         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7186                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7187                         netdev_warn(tp->dev,
7188                                     "Using a smaller RX jumbo ring. Only %d "
7189                                     "out of %d buffers were allocated "
7190                                     "successfully\n", i, tp->rx_jumbo_pending);
7191                         if (i == 0)
7192                                 goto initfail;
7193                         tp->rx_jumbo_pending = i;
7194                         break;
7195                 }
7196         }
7197
7198 done:
7199         return 0;
7200
7201 initfail:
7202         tg3_rx_prodring_free(tp, tpr);
7203         return -ENOMEM;
7204 }
7205
7206 static void tg3_rx_prodring_fini(struct tg3 *tp,
7207                                  struct tg3_rx_prodring_set *tpr)
7208 {
7209         kfree(tpr->rx_std_buffers);
7210         tpr->rx_std_buffers = NULL;
7211         kfree(tpr->rx_jmb_buffers);
7212         tpr->rx_jmb_buffers = NULL;
7213         if (tpr->rx_std) {
7214                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7215                                   tpr->rx_std, tpr->rx_std_mapping);
7216                 tpr->rx_std = NULL;
7217         }
7218         if (tpr->rx_jmb) {
7219                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7220                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7221                 tpr->rx_jmb = NULL;
7222         }
7223 }
7224
7225 static int tg3_rx_prodring_init(struct tg3 *tp,
7226                                 struct tg3_rx_prodring_set *tpr)
7227 {
7228         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7229                                       GFP_KERNEL);
7230         if (!tpr->rx_std_buffers)
7231                 return -ENOMEM;
7232
7233         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7234                                          TG3_RX_STD_RING_BYTES(tp),
7235                                          &tpr->rx_std_mapping,
7236                                          GFP_KERNEL);
7237         if (!tpr->rx_std)
7238                 goto err_out;
7239
7240         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7241                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7242                                               GFP_KERNEL);
7243                 if (!tpr->rx_jmb_buffers)
7244                         goto err_out;
7245
7246                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7247                                                  TG3_RX_JMB_RING_BYTES(tp),
7248                                                  &tpr->rx_jmb_mapping,
7249                                                  GFP_KERNEL);
7250                 if (!tpr->rx_jmb)
7251                         goto err_out;
7252         }
7253
7254         return 0;
7255
7256 err_out:
7257         tg3_rx_prodring_fini(tp, tpr);
7258         return -ENOMEM;
7259 }
7260
7261 /* Free up pending packets in all rx/tx rings.
7262  *
7263  * The chip has been shut down and the driver detached from
7264  * the networking, so no interrupts or new tx packets will
7265  * end up in the driver.  tp->{tx,}lock is not held and we are not
7266  * in an interrupt context and thus may sleep.
7267  */
7268 static void tg3_free_rings(struct tg3 *tp)
7269 {
7270         int i, j;
7271
7272         for (j = 0; j < tp->irq_cnt; j++) {
7273                 struct tg3_napi *tnapi = &tp->napi[j];
7274
7275                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7276
7277                 if (!tnapi->tx_buffers)
7278                         continue;
7279
7280                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7281                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7282
7283                         if (!skb)
7284                                 continue;
7285
7286                         tg3_tx_skb_unmap(tnapi, i,
7287                                          skb_shinfo(skb)->nr_frags - 1);
7288
7289                         dev_kfree_skb_any(skb);
7290                 }
7291         }
7292 }
7293
7294 /* Initialize tx/rx rings for packet processing.
7295  *
7296  * The chip has been shut down and the driver detached from
7297  * the networking, so no interrupts or new tx packets will
7298  * end up in the driver.  tp->{tx,}lock are held and thus
7299  * we may not sleep.
7300  */
7301 static int tg3_init_rings(struct tg3 *tp)
7302 {
7303         int i;
7304
7305         /* Free up all the SKBs. */
7306         tg3_free_rings(tp);
7307
7308         for (i = 0; i < tp->irq_cnt; i++) {
7309                 struct tg3_napi *tnapi = &tp->napi[i];
7310
7311                 tnapi->last_tag = 0;
7312                 tnapi->last_irq_tag = 0;
7313                 tnapi->hw_status->status = 0;
7314                 tnapi->hw_status->status_tag = 0;
7315                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7316
7317                 tnapi->tx_prod = 0;
7318                 tnapi->tx_cons = 0;
7319                 if (tnapi->tx_ring)
7320                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7321
7322                 tnapi->rx_rcb_ptr = 0;
7323                 if (tnapi->rx_rcb)
7324                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7325
7326                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7327                         tg3_free_rings(tp);
7328                         return -ENOMEM;
7329                 }
7330         }
7331
7332         return 0;
7333 }
7334
7335 /*
7336  * Must not be invoked with interrupt sources disabled and
7337  * the hardware shutdown down.
7338  */
7339 static void tg3_free_consistent(struct tg3 *tp)
7340 {
7341         int i;
7342
7343         for (i = 0; i < tp->irq_cnt; i++) {
7344                 struct tg3_napi *tnapi = &tp->napi[i];
7345
7346                 if (tnapi->tx_ring) {
7347                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7348                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7349                         tnapi->tx_ring = NULL;
7350                 }
7351
7352                 kfree(tnapi->tx_buffers);
7353                 tnapi->tx_buffers = NULL;
7354
7355                 if (tnapi->rx_rcb) {
7356                         dma_free_coherent(&tp->pdev->dev,
7357                                           TG3_RX_RCB_RING_BYTES(tp),
7358                                           tnapi->rx_rcb,
7359                                           tnapi->rx_rcb_mapping);
7360                         tnapi->rx_rcb = NULL;
7361                 }
7362
7363                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7364
7365                 if (tnapi->hw_status) {
7366                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7367                                           tnapi->hw_status,
7368                                           tnapi->status_mapping);
7369                         tnapi->hw_status = NULL;
7370                 }
7371         }
7372
7373         if (tp->hw_stats) {
7374                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7375                                   tp->hw_stats, tp->stats_mapping);
7376                 tp->hw_stats = NULL;
7377         }
7378 }
7379
7380 /*
7381  * Must not be invoked with interrupt sources disabled and
7382  * the hardware shutdown down.  Can sleep.
7383  */
7384 static int tg3_alloc_consistent(struct tg3 *tp)
7385 {
7386         int i;
7387
7388         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7389                                           sizeof(struct tg3_hw_stats),
7390                                           &tp->stats_mapping,
7391                                           GFP_KERNEL);
7392         if (!tp->hw_stats)
7393                 goto err_out;
7394
7395         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7396
7397         for (i = 0; i < tp->irq_cnt; i++) {
7398                 struct tg3_napi *tnapi = &tp->napi[i];
7399                 struct tg3_hw_status *sblk;
7400
7401                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7402                                                       TG3_HW_STATUS_SIZE,
7403                                                       &tnapi->status_mapping,
7404                                                       GFP_KERNEL);
7405                 if (!tnapi->hw_status)
7406                         goto err_out;
7407
7408                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7409                 sblk = tnapi->hw_status;
7410
7411                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7412                         goto err_out;
7413
7414                 /* If multivector TSS is enabled, vector 0 does not handle
7415                  * tx interrupts.  Don't allocate any resources for it.
7416                  */
7417                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7418                     (i && tg3_flag(tp, ENABLE_TSS))) {
7419                         tnapi->tx_buffers = kzalloc(
7420                                                sizeof(struct tg3_tx_ring_info) *
7421                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7422                         if (!tnapi->tx_buffers)
7423                                 goto err_out;
7424
7425                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7426                                                             TG3_TX_RING_BYTES,
7427                                                         &tnapi->tx_desc_mapping,
7428                                                             GFP_KERNEL);
7429                         if (!tnapi->tx_ring)
7430                                 goto err_out;
7431                 }
7432
7433                 /*
7434                  * When RSS is enabled, the status block format changes
7435                  * slightly.  The "rx_jumbo_consumer", "reserved",
7436                  * and "rx_mini_consumer" members get mapped to the
7437                  * other three rx return ring producer indexes.
7438                  */
7439                 switch (i) {
7440                 default:
7441                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7442                         break;
7443                 case 2:
7444                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7445                         break;
7446                 case 3:
7447                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7448                         break;
7449                 case 4:
7450                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7451                         break;
7452                 }
7453
7454                 /*
7455                  * If multivector RSS is enabled, vector 0 does not handle
7456                  * rx or tx interrupts.  Don't allocate any resources for it.
7457                  */
7458                 if (!i && tg3_flag(tp, ENABLE_RSS))
7459                         continue;
7460
7461                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7462                                                    TG3_RX_RCB_RING_BYTES(tp),
7463                                                    &tnapi->rx_rcb_mapping,
7464                                                    GFP_KERNEL);
7465                 if (!tnapi->rx_rcb)
7466                         goto err_out;
7467
7468                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7469         }
7470
7471         return 0;
7472
7473 err_out:
7474         tg3_free_consistent(tp);
7475         return -ENOMEM;
7476 }
7477
7478 #define MAX_WAIT_CNT 1000
7479
7480 /* To stop a block, clear the enable bit and poll till it
7481  * clears.  tp->lock is held.
7482  */
7483 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7484 {
7485         unsigned int i;
7486         u32 val;
7487
7488         if (tg3_flag(tp, 5705_PLUS)) {
7489                 switch (ofs) {
7490                 case RCVLSC_MODE:
7491                 case DMAC_MODE:
7492                 case MBFREE_MODE:
7493                 case BUFMGR_MODE:
7494                 case MEMARB_MODE:
7495                         /* We can't enable/disable these bits of the
7496                          * 5705/5750, just say success.
7497                          */
7498                         return 0;
7499
7500                 default:
7501                         break;
7502                 }
7503         }
7504
7505         val = tr32(ofs);
7506         val &= ~enable_bit;
7507         tw32_f(ofs, val);
7508
7509         for (i = 0; i < MAX_WAIT_CNT; i++) {
7510                 udelay(100);
7511                 val = tr32(ofs);
7512                 if ((val & enable_bit) == 0)
7513                         break;
7514         }
7515
7516         if (i == MAX_WAIT_CNT && !silent) {
7517                 dev_err(&tp->pdev->dev,
7518                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7519                         ofs, enable_bit);
7520                 return -ENODEV;
7521         }
7522
7523         return 0;
7524 }
7525
7526 /* tp->lock is held. */
7527 static int tg3_abort_hw(struct tg3 *tp, int silent)
7528 {
7529         int i, err;
7530
7531         tg3_disable_ints(tp);
7532
7533         tp->rx_mode &= ~RX_MODE_ENABLE;
7534         tw32_f(MAC_RX_MODE, tp->rx_mode);
7535         udelay(10);
7536
7537         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7538         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7539         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7540         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7541         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7542         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7543
7544         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7545         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7546         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7547         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7548         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7549         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7550         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7551
7552         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7553         tw32_f(MAC_MODE, tp->mac_mode);
7554         udelay(40);
7555
7556         tp->tx_mode &= ~TX_MODE_ENABLE;
7557         tw32_f(MAC_TX_MODE, tp->tx_mode);
7558
7559         for (i = 0; i < MAX_WAIT_CNT; i++) {
7560                 udelay(100);
7561                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7562                         break;
7563         }
7564         if (i >= MAX_WAIT_CNT) {
7565                 dev_err(&tp->pdev->dev,
7566                         "%s timed out, TX_MODE_ENABLE will not clear "
7567                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7568                 err |= -ENODEV;
7569         }
7570
7571         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7572         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7573         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7574
7575         tw32(FTQ_RESET, 0xffffffff);
7576         tw32(FTQ_RESET, 0x00000000);
7577
7578         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7579         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7580
7581         for (i = 0; i < tp->irq_cnt; i++) {
7582                 struct tg3_napi *tnapi = &tp->napi[i];
7583                 if (tnapi->hw_status)
7584                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7585         }
7586         if (tp->hw_stats)
7587                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7588
7589         return err;
7590 }
7591
7592 /* Save PCI command register before chip reset */
7593 static void tg3_save_pci_state(struct tg3 *tp)
7594 {
7595         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7596 }
7597
7598 /* Restore PCI state after chip reset */
7599 static void tg3_restore_pci_state(struct tg3 *tp)
7600 {
7601         u32 val;
7602
7603         /* Re-enable indirect register accesses. */
7604         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7605                                tp->misc_host_ctrl);
7606
7607         /* Set MAX PCI retry to zero. */
7608         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7609         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7610             tg3_flag(tp, PCIX_MODE))
7611                 val |= PCISTATE_RETRY_SAME_DMA;
7612         /* Allow reads and writes to the APE register and memory space. */
7613         if (tg3_flag(tp, ENABLE_APE))
7614                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7615                        PCISTATE_ALLOW_APE_SHMEM_WR |
7616                        PCISTATE_ALLOW_APE_PSPACE_WR;
7617         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7618
7619         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7620
7621         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7622                 if (tg3_flag(tp, PCI_EXPRESS))
7623                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7624                 else {
7625                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7626                                               tp->pci_cacheline_sz);
7627                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7628                                               tp->pci_lat_timer);
7629                 }
7630         }
7631
7632         /* Make sure PCI-X relaxed ordering bit is clear. */
7633         if (tg3_flag(tp, PCIX_MODE)) {
7634                 u16 pcix_cmd;
7635
7636                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7637                                      &pcix_cmd);
7638                 pcix_cmd &= ~PCI_X_CMD_ERO;
7639                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7640                                       pcix_cmd);
7641         }
7642
7643         if (tg3_flag(tp, 5780_CLASS)) {
7644
7645                 /* Chip reset on 5780 will reset MSI enable bit,
7646                  * so need to restore it.
7647                  */
7648                 if (tg3_flag(tp, USING_MSI)) {
7649                         u16 ctrl;
7650
7651                         pci_read_config_word(tp->pdev,
7652                                              tp->msi_cap + PCI_MSI_FLAGS,
7653                                              &ctrl);
7654                         pci_write_config_word(tp->pdev,
7655                                               tp->msi_cap + PCI_MSI_FLAGS,
7656                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7657                         val = tr32(MSGINT_MODE);
7658                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7659                 }
7660         }
7661 }
7662
7663 /* tp->lock is held. */
7664 static int tg3_chip_reset(struct tg3 *tp)
7665 {
7666         u32 val;
7667         void (*write_op)(struct tg3 *, u32, u32);
7668         int i, err;
7669
7670         tg3_nvram_lock(tp);
7671
7672         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7673
7674         /* No matching tg3_nvram_unlock() after this because
7675          * chip reset below will undo the nvram lock.
7676          */
7677         tp->nvram_lock_cnt = 0;
7678
7679         /* GRC_MISC_CFG core clock reset will clear the memory
7680          * enable bit in PCI register 4 and the MSI enable bit
7681          * on some chips, so we save relevant registers here.
7682          */
7683         tg3_save_pci_state(tp);
7684
7685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7686             tg3_flag(tp, 5755_PLUS))
7687                 tw32(GRC_FASTBOOT_PC, 0);
7688
7689         /*
7690          * We must avoid the readl() that normally takes place.
7691          * It locks machines, causes machine checks, and other
7692          * fun things.  So, temporarily disable the 5701
7693          * hardware workaround, while we do the reset.
7694          */
7695         write_op = tp->write32;
7696         if (write_op == tg3_write_flush_reg32)
7697                 tp->write32 = tg3_write32;
7698
7699         /* Prevent the irq handler from reading or writing PCI registers
7700          * during chip reset when the memory enable bit in the PCI command
7701          * register may be cleared.  The chip does not generate interrupt
7702          * at this time, but the irq handler may still be called due to irq
7703          * sharing or irqpoll.
7704          */
7705         tg3_flag_set(tp, CHIP_RESETTING);
7706         for (i = 0; i < tp->irq_cnt; i++) {
7707                 struct tg3_napi *tnapi = &tp->napi[i];
7708                 if (tnapi->hw_status) {
7709                         tnapi->hw_status->status = 0;
7710                         tnapi->hw_status->status_tag = 0;
7711                 }
7712                 tnapi->last_tag = 0;
7713                 tnapi->last_irq_tag = 0;
7714         }
7715         smp_mb();
7716
7717         for (i = 0; i < tp->irq_cnt; i++)
7718                 synchronize_irq(tp->napi[i].irq_vec);
7719
7720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7721                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7722                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7723         }
7724
7725         /* do the reset */
7726         val = GRC_MISC_CFG_CORECLK_RESET;
7727
7728         if (tg3_flag(tp, PCI_EXPRESS)) {
7729                 /* Force PCIe 1.0a mode */
7730                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7731                     !tg3_flag(tp, 57765_PLUS) &&
7732                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7733                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7734                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7735
7736                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7737                         tw32(GRC_MISC_CFG, (1 << 29));
7738                         val |= (1 << 29);
7739                 }
7740         }
7741
7742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7743                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7744                 tw32(GRC_VCPU_EXT_CTRL,
7745                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7746         }
7747
7748         /* Manage gphy power for all CPMU absent PCIe devices. */
7749         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7750                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7751
7752         tw32(GRC_MISC_CFG, val);
7753
7754         /* restore 5701 hardware bug workaround write method */
7755         tp->write32 = write_op;
7756
7757         /* Unfortunately, we have to delay before the PCI read back.
7758          * Some 575X chips even will not respond to a PCI cfg access
7759          * when the reset command is given to the chip.
7760          *
7761          * How do these hardware designers expect things to work
7762          * properly if the PCI write is posted for a long period
7763          * of time?  It is always necessary to have some method by
7764          * which a register read back can occur to push the write
7765          * out which does the reset.
7766          *
7767          * For most tg3 variants the trick below was working.
7768          * Ho hum...
7769          */
7770         udelay(120);
7771
7772         /* Flush PCI posted writes.  The normal MMIO registers
7773          * are inaccessible at this time so this is the only
7774          * way to make this reliably (actually, this is no longer
7775          * the case, see above).  I tried to use indirect
7776          * register read/write but this upset some 5701 variants.
7777          */
7778         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7779
7780         udelay(120);
7781
7782         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7783                 u16 val16;
7784
7785                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7786                         int i;
7787                         u32 cfg_val;
7788
7789                         /* Wait for link training to complete.  */
7790                         for (i = 0; i < 5000; i++)
7791                                 udelay(100);
7792
7793                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7794                         pci_write_config_dword(tp->pdev, 0xc4,
7795                                                cfg_val | (1 << 15));
7796                 }
7797
7798                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7799                 pci_read_config_word(tp->pdev,
7800                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7801                                      &val16);
7802                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7803                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7804                 /*
7805                  * Older PCIe devices only support the 128 byte
7806                  * MPS setting.  Enforce the restriction.
7807                  */
7808                 if (!tg3_flag(tp, CPMU_PRESENT))
7809                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7810                 pci_write_config_word(tp->pdev,
7811                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7812                                       val16);
7813
7814                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7815
7816                 /* Clear error status */
7817                 pci_write_config_word(tp->pdev,
7818                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7819                                       PCI_EXP_DEVSTA_CED |
7820                                       PCI_EXP_DEVSTA_NFED |
7821                                       PCI_EXP_DEVSTA_FED |
7822                                       PCI_EXP_DEVSTA_URD);
7823         }
7824
7825         tg3_restore_pci_state(tp);
7826
7827         tg3_flag_clear(tp, CHIP_RESETTING);
7828         tg3_flag_clear(tp, ERROR_PROCESSED);
7829
7830         val = 0;
7831         if (tg3_flag(tp, 5780_CLASS))
7832                 val = tr32(MEMARB_MODE);
7833         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7834
7835         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7836                 tg3_stop_fw(tp);
7837                 tw32(0x5000, 0x400);
7838         }
7839
7840         tw32(GRC_MODE, tp->grc_mode);
7841
7842         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7843                 val = tr32(0xc4);
7844
7845                 tw32(0xc4, val | (1 << 15));
7846         }
7847
7848         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7849             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7850                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7851                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7852                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7853                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7854         }
7855
7856         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7857                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7858                 val = tp->mac_mode;
7859         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7860                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7861                 val = tp->mac_mode;
7862         } else
7863                 val = 0;
7864
7865         tw32_f(MAC_MODE, val);
7866         udelay(40);
7867
7868         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7869
7870         err = tg3_poll_fw(tp);
7871         if (err)
7872                 return err;
7873
7874         tg3_mdio_start(tp);
7875
7876         if (tg3_flag(tp, PCI_EXPRESS) &&
7877             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7878             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7879             !tg3_flag(tp, 57765_PLUS)) {
7880                 val = tr32(0x7c00);
7881
7882                 tw32(0x7c00, val | (1 << 25));
7883         }
7884
7885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7886                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7887                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7888         }
7889
7890         /* Reprobe ASF enable state.  */
7891         tg3_flag_clear(tp, ENABLE_ASF);
7892         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7893         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7894         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7895                 u32 nic_cfg;
7896
7897                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7898                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7899                         tg3_flag_set(tp, ENABLE_ASF);
7900                         tp->last_event_jiffies = jiffies;
7901                         if (tg3_flag(tp, 5750_PLUS))
7902                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7903                 }
7904         }
7905
7906         return 0;
7907 }
7908
7909 /* tp->lock is held. */
7910 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7911 {
7912         int err;
7913
7914         tg3_stop_fw(tp);
7915
7916         tg3_write_sig_pre_reset(tp, kind);
7917
7918         tg3_abort_hw(tp, silent);
7919         err = tg3_chip_reset(tp);
7920
7921         __tg3_set_mac_addr(tp, 0);
7922
7923         tg3_write_sig_legacy(tp, kind);
7924         tg3_write_sig_post_reset(tp, kind);
7925
7926         if (err)
7927                 return err;
7928
7929         return 0;
7930 }
7931
7932 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7933 {
7934         struct tg3 *tp = netdev_priv(dev);
7935         struct sockaddr *addr = p;
7936         int err = 0, skip_mac_1 = 0;
7937
7938         if (!is_valid_ether_addr(addr->sa_data))
7939                 return -EINVAL;
7940
7941         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7942
7943         if (!netif_running(dev))
7944                 return 0;
7945
7946         if (tg3_flag(tp, ENABLE_ASF)) {
7947                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7948
7949                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7950                 addr0_low = tr32(MAC_ADDR_0_LOW);
7951                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7952                 addr1_low = tr32(MAC_ADDR_1_LOW);
7953
7954                 /* Skip MAC addr 1 if ASF is using it. */
7955                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7956                     !(addr1_high == 0 && addr1_low == 0))
7957                         skip_mac_1 = 1;
7958         }
7959         spin_lock_bh(&tp->lock);
7960         __tg3_set_mac_addr(tp, skip_mac_1);
7961         spin_unlock_bh(&tp->lock);
7962
7963         return err;
7964 }
7965
7966 /* tp->lock is held. */
7967 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7968                            dma_addr_t mapping, u32 maxlen_flags,
7969                            u32 nic_addr)
7970 {
7971         tg3_write_mem(tp,
7972                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7973                       ((u64) mapping >> 32));
7974         tg3_write_mem(tp,
7975                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7976                       ((u64) mapping & 0xffffffff));
7977         tg3_write_mem(tp,
7978                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7979                        maxlen_flags);
7980
7981         if (!tg3_flag(tp, 5705_PLUS))
7982                 tg3_write_mem(tp,
7983                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7984                               nic_addr);
7985 }
7986
7987 static void __tg3_set_rx_mode(struct net_device *);
7988 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7989 {
7990         int i;
7991
7992         if (!tg3_flag(tp, ENABLE_TSS)) {
7993                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7994                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7995                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7996         } else {
7997                 tw32(HOSTCC_TXCOL_TICKS, 0);
7998                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7999                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8000         }
8001
8002         if (!tg3_flag(tp, ENABLE_RSS)) {
8003                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8004                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8005                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8006         } else {
8007                 tw32(HOSTCC_RXCOL_TICKS, 0);
8008                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8009                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8010         }
8011
8012         if (!tg3_flag(tp, 5705_PLUS)) {
8013                 u32 val = ec->stats_block_coalesce_usecs;
8014
8015                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8016                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8017
8018                 if (!netif_carrier_ok(tp->dev))
8019                         val = 0;
8020
8021                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8022         }
8023
8024         for (i = 0; i < tp->irq_cnt - 1; i++) {
8025                 u32 reg;
8026
8027                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8028                 tw32(reg, ec->rx_coalesce_usecs);
8029                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8030                 tw32(reg, ec->rx_max_coalesced_frames);
8031                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8032                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8033
8034                 if (tg3_flag(tp, ENABLE_TSS)) {
8035                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8036                         tw32(reg, ec->tx_coalesce_usecs);
8037                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8038                         tw32(reg, ec->tx_max_coalesced_frames);
8039                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8040                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8041                 }
8042         }
8043
8044         for (; i < tp->irq_max - 1; i++) {
8045                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8046                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8047                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8048
8049                 if (tg3_flag(tp, ENABLE_TSS)) {
8050                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8051                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8052                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8053                 }
8054         }
8055 }
8056
8057 /* tp->lock is held. */
8058 static void tg3_rings_reset(struct tg3 *tp)
8059 {
8060         int i;
8061         u32 stblk, txrcb, rxrcb, limit;
8062         struct tg3_napi *tnapi = &tp->napi[0];
8063
8064         /* Disable all transmit rings but the first. */
8065         if (!tg3_flag(tp, 5705_PLUS))
8066                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8067         else if (tg3_flag(tp, 5717_PLUS))
8068                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8069         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8070                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8071         else
8072                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8073
8074         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8075              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8076                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8077                               BDINFO_FLAGS_DISABLED);
8078
8079
8080         /* Disable all receive return rings but the first. */
8081         if (tg3_flag(tp, 5717_PLUS))
8082                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8083         else if (!tg3_flag(tp, 5705_PLUS))
8084                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8085         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8086                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8087                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8088         else
8089                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8090
8091         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8092              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8093                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8094                               BDINFO_FLAGS_DISABLED);
8095
8096         /* Disable interrupts */
8097         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8098         tp->napi[0].chk_msi_cnt = 0;
8099         tp->napi[0].last_rx_cons = 0;
8100         tp->napi[0].last_tx_cons = 0;
8101
8102         /* Zero mailbox registers. */
8103         if (tg3_flag(tp, SUPPORT_MSIX)) {
8104                 for (i = 1; i < tp->irq_max; i++) {
8105                         tp->napi[i].tx_prod = 0;
8106                         tp->napi[i].tx_cons = 0;
8107                         if (tg3_flag(tp, ENABLE_TSS))
8108                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8109                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8110                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8111                         tp->napi[i].chk_msi_cnt = 0;
8112                         tp->napi[i].last_rx_cons = 0;
8113                         tp->napi[i].last_tx_cons = 0;
8114                 }
8115                 if (!tg3_flag(tp, ENABLE_TSS))
8116                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8117         } else {
8118                 tp->napi[0].tx_prod = 0;
8119                 tp->napi[0].tx_cons = 0;
8120                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8121                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8122         }
8123
8124         /* Make sure the NIC-based send BD rings are disabled. */
8125         if (!tg3_flag(tp, 5705_PLUS)) {
8126                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8127                 for (i = 0; i < 16; i++)
8128                         tw32_tx_mbox(mbox + i * 8, 0);
8129         }
8130
8131         txrcb = NIC_SRAM_SEND_RCB;
8132         rxrcb = NIC_SRAM_RCV_RET_RCB;
8133
8134         /* Clear status block in ram. */
8135         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8136
8137         /* Set status block DMA address */
8138         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8139              ((u64) tnapi->status_mapping >> 32));
8140         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8141              ((u64) tnapi->status_mapping & 0xffffffff));
8142
8143         if (tnapi->tx_ring) {
8144                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8145                                (TG3_TX_RING_SIZE <<
8146                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8147                                NIC_SRAM_TX_BUFFER_DESC);
8148                 txrcb += TG3_BDINFO_SIZE;
8149         }
8150
8151         if (tnapi->rx_rcb) {
8152                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8153                                (tp->rx_ret_ring_mask + 1) <<
8154                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8155                 rxrcb += TG3_BDINFO_SIZE;
8156         }
8157
8158         stblk = HOSTCC_STATBLCK_RING1;
8159
8160         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8161                 u64 mapping = (u64)tnapi->status_mapping;
8162                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8163                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8164
8165                 /* Clear status block in ram. */
8166                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8167
8168                 if (tnapi->tx_ring) {
8169                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8170                                        (TG3_TX_RING_SIZE <<
8171                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8172                                        NIC_SRAM_TX_BUFFER_DESC);
8173                         txrcb += TG3_BDINFO_SIZE;
8174                 }
8175
8176                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8177                                ((tp->rx_ret_ring_mask + 1) <<
8178                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8179
8180                 stblk += 8;
8181                 rxrcb += TG3_BDINFO_SIZE;
8182         }
8183 }
8184
8185 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8186 {
8187         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8188
8189         if (!tg3_flag(tp, 5750_PLUS) ||
8190             tg3_flag(tp, 5780_CLASS) ||
8191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8192             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8193                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8194         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8195                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8196                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8197         else
8198                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8199
8200         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8201         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8202
8203         val = min(nic_rep_thresh, host_rep_thresh);
8204         tw32(RCVBDI_STD_THRESH, val);
8205
8206         if (tg3_flag(tp, 57765_PLUS))
8207                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8208
8209         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8210                 return;
8211
8212         if (!tg3_flag(tp, 5705_PLUS))
8213                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8214         else
8215                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8216
8217         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8218
8219         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8220         tw32(RCVBDI_JUMBO_THRESH, val);
8221
8222         if (tg3_flag(tp, 57765_PLUS))
8223                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8224 }
8225
8226 /* tp->lock is held. */
8227 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8228 {
8229         u32 val, rdmac_mode;
8230         int i, err, limit;
8231         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8232
8233         tg3_disable_ints(tp);
8234
8235         tg3_stop_fw(tp);
8236
8237         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8238
8239         if (tg3_flag(tp, INIT_COMPLETE))
8240                 tg3_abort_hw(tp, 1);
8241
8242         /* Enable MAC control of LPI */
8243         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8244                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8245                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8246                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8247
8248                 tw32_f(TG3_CPMU_EEE_CTRL,
8249                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8250
8251                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8252                       TG3_CPMU_EEEMD_LPI_IN_TX |
8253                       TG3_CPMU_EEEMD_LPI_IN_RX |
8254                       TG3_CPMU_EEEMD_EEE_ENABLE;
8255
8256                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8257                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8258
8259                 if (tg3_flag(tp, ENABLE_APE))
8260                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8261
8262                 tw32_f(TG3_CPMU_EEE_MODE, val);
8263
8264                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8265                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8266                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8267
8268                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8269                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8270                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8271         }
8272
8273         if (reset_phy)
8274                 tg3_phy_reset(tp);
8275
8276         err = tg3_chip_reset(tp);
8277         if (err)
8278                 return err;
8279
8280         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8281
8282         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8283                 val = tr32(TG3_CPMU_CTRL);
8284                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8285                 tw32(TG3_CPMU_CTRL, val);
8286
8287                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8288                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8289                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8290                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8291
8292                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8293                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8294                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8295                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8296
8297                 val = tr32(TG3_CPMU_HST_ACC);
8298                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8299                 val |= CPMU_HST_ACC_MACCLK_6_25;
8300                 tw32(TG3_CPMU_HST_ACC, val);
8301         }
8302
8303         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8304                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8305                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8306                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8307                 tw32(PCIE_PWR_MGMT_THRESH, val);
8308
8309                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8310                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8311
8312                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8313
8314                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8315                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8316         }
8317
8318         if (tg3_flag(tp, L1PLLPD_EN)) {
8319                 u32 grc_mode = tr32(GRC_MODE);
8320
8321                 /* Access the lower 1K of PL PCIE block registers. */
8322                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8323                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8324
8325                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8326                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8327                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8328
8329                 tw32(GRC_MODE, grc_mode);
8330         }
8331
8332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8333                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8334                         u32 grc_mode = tr32(GRC_MODE);
8335
8336                         /* Access the lower 1K of PL PCIE block registers. */
8337                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8338                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8339
8340                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8341                                    TG3_PCIE_PL_LO_PHYCTL5);
8342                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8343                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8344
8345                         tw32(GRC_MODE, grc_mode);
8346                 }
8347
8348                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8349                         u32 grc_mode = tr32(GRC_MODE);
8350
8351                         /* Access the lower 1K of DL PCIE block registers. */
8352                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8353                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8354
8355                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8356                                    TG3_PCIE_DL_LO_FTSMAX);
8357                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8358                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8359                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8360
8361                         tw32(GRC_MODE, grc_mode);
8362                 }
8363
8364                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8365                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8366                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8367                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8368         }
8369
8370         /* This works around an issue with Athlon chipsets on
8371          * B3 tigon3 silicon.  This bit has no effect on any
8372          * other revision.  But do not set this on PCI Express
8373          * chips and don't even touch the clocks if the CPMU is present.
8374          */
8375         if (!tg3_flag(tp, CPMU_PRESENT)) {
8376                 if (!tg3_flag(tp, PCI_EXPRESS))
8377                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8378                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8379         }
8380
8381         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8382             tg3_flag(tp, PCIX_MODE)) {
8383                 val = tr32(TG3PCI_PCISTATE);
8384                 val |= PCISTATE_RETRY_SAME_DMA;
8385                 tw32(TG3PCI_PCISTATE, val);
8386         }
8387
8388         if (tg3_flag(tp, ENABLE_APE)) {
8389                 /* Allow reads and writes to the
8390                  * APE register and memory space.
8391                  */
8392                 val = tr32(TG3PCI_PCISTATE);
8393                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8394                        PCISTATE_ALLOW_APE_SHMEM_WR |
8395                        PCISTATE_ALLOW_APE_PSPACE_WR;
8396                 tw32(TG3PCI_PCISTATE, val);
8397         }
8398
8399         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8400                 /* Enable some hw fixes.  */
8401                 val = tr32(TG3PCI_MSI_DATA);
8402                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8403                 tw32(TG3PCI_MSI_DATA, val);
8404         }
8405
8406         /* Descriptor ring init may make accesses to the
8407          * NIC SRAM area to setup the TX descriptors, so we
8408          * can only do this after the hardware has been
8409          * successfully reset.
8410          */
8411         err = tg3_init_rings(tp);
8412         if (err)
8413                 return err;
8414
8415         if (tg3_flag(tp, 57765_PLUS)) {
8416                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8417                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8418                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8419                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8420                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8421                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8422                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8423                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8424         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8425                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8426                 /* This value is determined during the probe time DMA
8427                  * engine test, tg3_test_dma.
8428                  */
8429                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8430         }
8431
8432         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8433                           GRC_MODE_4X_NIC_SEND_RINGS |
8434                           GRC_MODE_NO_TX_PHDR_CSUM |
8435                           GRC_MODE_NO_RX_PHDR_CSUM);
8436         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8437
8438         /* Pseudo-header checksum is done by hardware logic and not
8439          * the offload processers, so make the chip do the pseudo-
8440          * header checksums on receive.  For transmit it is more
8441          * convenient to do the pseudo-header checksum in software
8442          * as Linux does that on transmit for us in all cases.
8443          */
8444         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8445
8446         tw32(GRC_MODE,
8447              tp->grc_mode |
8448              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8449
8450         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8451         val = tr32(GRC_MISC_CFG);
8452         val &= ~0xff;
8453         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8454         tw32(GRC_MISC_CFG, val);
8455
8456         /* Initialize MBUF/DESC pool. */
8457         if (tg3_flag(tp, 5750_PLUS)) {
8458                 /* Do nothing.  */
8459         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8460                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8461                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8462                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8463                 else
8464                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8465                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8466                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8467         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8468                 int fw_len;
8469
8470                 fw_len = tp->fw_len;
8471                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8472                 tw32(BUFMGR_MB_POOL_ADDR,
8473                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8474                 tw32(BUFMGR_MB_POOL_SIZE,
8475                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8476         }
8477
8478         if (tp->dev->mtu <= ETH_DATA_LEN) {
8479                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8480                      tp->bufmgr_config.mbuf_read_dma_low_water);
8481                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8482                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8483                 tw32(BUFMGR_MB_HIGH_WATER,
8484                      tp->bufmgr_config.mbuf_high_water);
8485         } else {
8486                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8487                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8488                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8489                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8490                 tw32(BUFMGR_MB_HIGH_WATER,
8491                      tp->bufmgr_config.mbuf_high_water_jumbo);
8492         }
8493         tw32(BUFMGR_DMA_LOW_WATER,
8494              tp->bufmgr_config.dma_low_water);
8495         tw32(BUFMGR_DMA_HIGH_WATER,
8496              tp->bufmgr_config.dma_high_water);
8497
8498         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8500                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8502             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8503             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8504                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8505         tw32(BUFMGR_MODE, val);
8506         for (i = 0; i < 2000; i++) {
8507                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8508                         break;
8509                 udelay(10);
8510         }
8511         if (i >= 2000) {
8512                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8513                 return -ENODEV;
8514         }
8515
8516         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8517                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8518
8519         tg3_setup_rxbd_thresholds(tp);
8520
8521         /* Initialize TG3_BDINFO's at:
8522          *  RCVDBDI_STD_BD:     standard eth size rx ring
8523          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8524          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8525          *
8526          * like so:
8527          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8528          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8529          *                              ring attribute flags
8530          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8531          *
8532          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8533          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8534          *
8535          * The size of each ring is fixed in the firmware, but the location is
8536          * configurable.
8537          */
8538         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8539              ((u64) tpr->rx_std_mapping >> 32));
8540         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8541              ((u64) tpr->rx_std_mapping & 0xffffffff));
8542         if (!tg3_flag(tp, 5717_PLUS))
8543                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8544                      NIC_SRAM_RX_BUFFER_DESC);
8545
8546         /* Disable the mini ring */
8547         if (!tg3_flag(tp, 5705_PLUS))
8548                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8549                      BDINFO_FLAGS_DISABLED);
8550
8551         /* Program the jumbo buffer descriptor ring control
8552          * blocks on those devices that have them.
8553          */
8554         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8555             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8556
8557                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8558                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8559                              ((u64) tpr->rx_jmb_mapping >> 32));
8560                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8561                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8562                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8563                               BDINFO_FLAGS_MAXLEN_SHIFT;
8564                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8565                              val | BDINFO_FLAGS_USE_EXT_RECV);
8566                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8567                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8568                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8569                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8570                 } else {
8571                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8572                              BDINFO_FLAGS_DISABLED);
8573                 }
8574
8575                 if (tg3_flag(tp, 57765_PLUS)) {
8576                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8577                                 val = TG3_RX_STD_MAX_SIZE_5700;
8578                         else
8579                                 val = TG3_RX_STD_MAX_SIZE_5717;
8580                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8581                         val |= (TG3_RX_STD_DMA_SZ << 2);
8582                 } else
8583                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8584         } else
8585                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8586
8587         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8588
8589         tpr->rx_std_prod_idx = tp->rx_pending;
8590         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8591
8592         tpr->rx_jmb_prod_idx =
8593                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8594         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8595
8596         tg3_rings_reset(tp);
8597
8598         /* Initialize MAC address and backoff seed. */
8599         __tg3_set_mac_addr(tp, 0);
8600
8601         /* MTU + ethernet header + FCS + optional VLAN tag */
8602         tw32(MAC_RX_MTU_SIZE,
8603              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8604
8605         /* The slot time is changed by tg3_setup_phy if we
8606          * run at gigabit with half duplex.
8607          */
8608         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8609               (6 << TX_LENGTHS_IPG_SHIFT) |
8610               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8611
8612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8613                 val |= tr32(MAC_TX_LENGTHS) &
8614                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8615                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8616
8617         tw32(MAC_TX_LENGTHS, val);
8618
8619         /* Receive rules. */
8620         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8621         tw32(RCVLPC_CONFIG, 0x0181);
8622
8623         /* Calculate RDMAC_MODE setting early, we need it to determine
8624          * the RCVLPC_STATE_ENABLE mask.
8625          */
8626         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8627                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8628                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8629                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8630                       RDMAC_MODE_LNGREAD_ENAB);
8631
8632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8633                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8634
8635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8638                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8639                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8640                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8641
8642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8643             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8644                 if (tg3_flag(tp, TSO_CAPABLE) &&
8645                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8646                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8647                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8648                            !tg3_flag(tp, IS_5788)) {
8649                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8650                 }
8651         }
8652
8653         if (tg3_flag(tp, PCI_EXPRESS))
8654                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8655
8656         if (tg3_flag(tp, HW_TSO_1) ||
8657             tg3_flag(tp, HW_TSO_2) ||
8658             tg3_flag(tp, HW_TSO_3))
8659                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8660
8661         if (tg3_flag(tp, 57765_PLUS) ||
8662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8663             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8664                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8665
8666         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8667                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8668
8669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8670             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8673             tg3_flag(tp, 57765_PLUS)) {
8674                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8675                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8676                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8677                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8678                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8679                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8680                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8681                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8682                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8683                 }
8684                 tw32(TG3_RDMA_RSRVCTRL_REG,
8685                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8686         }
8687
8688         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8689             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8690                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8691                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8692                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8693                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8694         }
8695
8696         /* Receive/send statistics. */
8697         if (tg3_flag(tp, 5750_PLUS)) {
8698                 val = tr32(RCVLPC_STATS_ENABLE);
8699                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8700                 tw32(RCVLPC_STATS_ENABLE, val);
8701         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8702                    tg3_flag(tp, TSO_CAPABLE)) {
8703                 val = tr32(RCVLPC_STATS_ENABLE);
8704                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8705                 tw32(RCVLPC_STATS_ENABLE, val);
8706         } else {
8707                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8708         }
8709         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8710         tw32(SNDDATAI_STATSENAB, 0xffffff);
8711         tw32(SNDDATAI_STATSCTRL,
8712              (SNDDATAI_SCTRL_ENABLE |
8713               SNDDATAI_SCTRL_FASTUPD));
8714
8715         /* Setup host coalescing engine. */
8716         tw32(HOSTCC_MODE, 0);
8717         for (i = 0; i < 2000; i++) {
8718                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8719                         break;
8720                 udelay(10);
8721         }
8722
8723         __tg3_set_coalesce(tp, &tp->coal);
8724
8725         if (!tg3_flag(tp, 5705_PLUS)) {
8726                 /* Status/statistics block address.  See tg3_timer,
8727                  * the tg3_periodic_fetch_stats call there, and
8728                  * tg3_get_stats to see how this works for 5705/5750 chips.
8729                  */
8730                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8731                      ((u64) tp->stats_mapping >> 32));
8732                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8733                      ((u64) tp->stats_mapping & 0xffffffff));
8734                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8735
8736                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8737
8738                 /* Clear statistics and status block memory areas */
8739                 for (i = NIC_SRAM_STATS_BLK;
8740                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8741                      i += sizeof(u32)) {
8742                         tg3_write_mem(tp, i, 0);
8743                         udelay(40);
8744                 }
8745         }
8746
8747         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8748
8749         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8750         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8751         if (!tg3_flag(tp, 5705_PLUS))
8752                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8753
8754         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8755                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8756                 /* reset to prevent losing 1st rx packet intermittently */
8757                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8758                 udelay(10);
8759         }
8760
8761         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8762                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8763                         MAC_MODE_FHDE_ENABLE;
8764         if (tg3_flag(tp, ENABLE_APE))
8765                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8766         if (!tg3_flag(tp, 5705_PLUS) &&
8767             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8768             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8769                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8770         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8771         udelay(40);
8772
8773         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8774          * If TG3_FLAG_IS_NIC is zero, we should read the
8775          * register to preserve the GPIO settings for LOMs. The GPIOs,
8776          * whether used as inputs or outputs, are set by boot code after
8777          * reset.
8778          */
8779         if (!tg3_flag(tp, IS_NIC)) {
8780                 u32 gpio_mask;
8781
8782                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8783                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8784                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8785
8786                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8787                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8788                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8789
8790                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8791                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8792
8793                 tp->grc_local_ctrl &= ~gpio_mask;
8794                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8795
8796                 /* GPIO1 must be driven high for eeprom write protect */
8797                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8798                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8799                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8800         }
8801         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8802         udelay(100);
8803
8804         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8805                 val = tr32(MSGINT_MODE);
8806                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8807                 if (!tg3_flag(tp, 1SHOT_MSI))
8808                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8809                 tw32(MSGINT_MODE, val);
8810         }
8811
8812         if (!tg3_flag(tp, 5705_PLUS)) {
8813                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8814                 udelay(40);
8815         }
8816
8817         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8818                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8819                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8820                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8821                WDMAC_MODE_LNGREAD_ENAB);
8822
8823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8824             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8825                 if (tg3_flag(tp, TSO_CAPABLE) &&
8826                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8827                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8828                         /* nothing */
8829                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8830                            !tg3_flag(tp, IS_5788)) {
8831                         val |= WDMAC_MODE_RX_ACCEL;
8832                 }
8833         }
8834
8835         /* Enable host coalescing bug fix */
8836         if (tg3_flag(tp, 5755_PLUS))
8837                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8838
8839         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8840                 val |= WDMAC_MODE_BURST_ALL_DATA;
8841
8842         tw32_f(WDMAC_MODE, val);
8843         udelay(40);
8844
8845         if (tg3_flag(tp, PCIX_MODE)) {
8846                 u16 pcix_cmd;
8847
8848                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8849                                      &pcix_cmd);
8850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8851                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8852                         pcix_cmd |= PCI_X_CMD_READ_2K;
8853                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8854                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8855                         pcix_cmd |= PCI_X_CMD_READ_2K;
8856                 }
8857                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8858                                       pcix_cmd);
8859         }
8860
8861         tw32_f(RDMAC_MODE, rdmac_mode);
8862         udelay(40);
8863
8864         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8865         if (!tg3_flag(tp, 5705_PLUS))
8866                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8867
8868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8869                 tw32(SNDDATAC_MODE,
8870                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8871         else
8872                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8873
8874         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8875         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8876         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8877         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8878                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8879         tw32(RCVDBDI_MODE, val);
8880         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8881         if (tg3_flag(tp, HW_TSO_1) ||
8882             tg3_flag(tp, HW_TSO_2) ||
8883             tg3_flag(tp, HW_TSO_3))
8884                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8885         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8886         if (tg3_flag(tp, ENABLE_TSS))
8887                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8888         tw32(SNDBDI_MODE, val);
8889         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8890
8891         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8892                 err = tg3_load_5701_a0_firmware_fix(tp);
8893                 if (err)
8894                         return err;
8895         }
8896
8897         if (tg3_flag(tp, TSO_CAPABLE)) {
8898                 err = tg3_load_tso_firmware(tp);
8899                 if (err)
8900                         return err;
8901         }
8902
8903         tp->tx_mode = TX_MODE_ENABLE;
8904
8905         if (tg3_flag(tp, 5755_PLUS) ||
8906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8907                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8908
8909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8910                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8911                 tp->tx_mode &= ~val;
8912                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8913         }
8914
8915         tw32_f(MAC_TX_MODE, tp->tx_mode);
8916         udelay(100);
8917
8918         if (tg3_flag(tp, ENABLE_RSS)) {
8919                 int i = 0;
8920                 u32 reg = MAC_RSS_INDIR_TBL_0;
8921
8922                 if (tp->irq_cnt == 2) {
8923                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8924                                 tw32(reg, 0x0);
8925                                 reg += 4;
8926                         }
8927                 } else {
8928                         u32 val;
8929
8930                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8931                                 val = i % (tp->irq_cnt - 1);
8932                                 i++;
8933                                 for (; i % 8; i++) {
8934                                         val <<= 4;
8935                                         val |= (i % (tp->irq_cnt - 1));
8936                                 }
8937                                 tw32(reg, val);
8938                                 reg += 4;
8939                         }
8940                 }
8941
8942                 /* Setup the "secret" hash key. */
8943                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8944                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8945                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8946                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8947                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8948                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8949                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8950                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8951                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8952                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8953         }
8954
8955         tp->rx_mode = RX_MODE_ENABLE;
8956         if (tg3_flag(tp, 5755_PLUS))
8957                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8958
8959         if (tg3_flag(tp, ENABLE_RSS))
8960                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8961                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8962                                RX_MODE_RSS_IPV6_HASH_EN |
8963                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8964                                RX_MODE_RSS_IPV4_HASH_EN |
8965                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8966
8967         tw32_f(MAC_RX_MODE, tp->rx_mode);
8968         udelay(10);
8969
8970         tw32(MAC_LED_CTRL, tp->led_ctrl);
8971
8972         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8973         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8974                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8975                 udelay(10);
8976         }
8977         tw32_f(MAC_RX_MODE, tp->rx_mode);
8978         udelay(10);
8979
8980         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8981                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8982                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8983                         /* Set drive transmission level to 1.2V  */
8984                         /* only if the signal pre-emphasis bit is not set  */
8985                         val = tr32(MAC_SERDES_CFG);
8986                         val &= 0xfffff000;
8987                         val |= 0x880;
8988                         tw32(MAC_SERDES_CFG, val);
8989                 }
8990                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8991                         tw32(MAC_SERDES_CFG, 0x616000);
8992         }
8993
8994         /* Prevent chip from dropping frames when flow control
8995          * is enabled.
8996          */
8997         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8998                 val = 1;
8999         else
9000                 val = 2;
9001         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9002
9003         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9004             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9005                 /* Use hardware link auto-negotiation */
9006                 tg3_flag_set(tp, HW_AUTONEG);
9007         }
9008
9009         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9011                 u32 tmp;
9012
9013                 tmp = tr32(SERDES_RX_CTRL);
9014                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9015                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9016                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9017                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9018         }
9019
9020         if (!tg3_flag(tp, USE_PHYLIB)) {
9021                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9022                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9023                         tp->link_config.speed = tp->link_config.orig_speed;
9024                         tp->link_config.duplex = tp->link_config.orig_duplex;
9025                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9026                 }
9027
9028                 err = tg3_setup_phy(tp, 0);
9029                 if (err)
9030                         return err;
9031
9032                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9033                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9034                         u32 tmp;
9035
9036                         /* Clear CRC stats. */
9037                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9038                                 tg3_writephy(tp, MII_TG3_TEST1,
9039                                              tmp | MII_TG3_TEST1_CRC_EN);
9040                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9041                         }
9042                 }
9043         }
9044
9045         __tg3_set_rx_mode(tp->dev);
9046
9047         /* Initialize receive rules. */
9048         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9049         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9050         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9051         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9052
9053         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9054                 limit = 8;
9055         else
9056                 limit = 16;
9057         if (tg3_flag(tp, ENABLE_ASF))
9058                 limit -= 4;
9059         switch (limit) {
9060         case 16:
9061                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9062         case 15:
9063                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9064         case 14:
9065                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9066         case 13:
9067                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9068         case 12:
9069                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9070         case 11:
9071                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9072         case 10:
9073                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9074         case 9:
9075                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9076         case 8:
9077                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9078         case 7:
9079                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9080         case 6:
9081                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9082         case 5:
9083                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9084         case 4:
9085                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9086         case 3:
9087                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9088         case 2:
9089         case 1:
9090
9091         default:
9092                 break;
9093         }
9094
9095         if (tg3_flag(tp, ENABLE_APE))
9096                 /* Write our heartbeat update interval to APE. */
9097                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9098                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9099
9100         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9101
9102         return 0;
9103 }
9104
9105 /* Called at device open time to get the chip ready for
9106  * packet processing.  Invoked with tp->lock held.
9107  */
9108 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9109 {
9110         tg3_switch_clocks(tp);
9111
9112         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9113
9114         return tg3_reset_hw(tp, reset_phy);
9115 }
9116
9117 #define TG3_STAT_ADD32(PSTAT, REG) \
9118 do {    u32 __val = tr32(REG); \
9119         (PSTAT)->low += __val; \
9120         if ((PSTAT)->low < __val) \
9121                 (PSTAT)->high += 1; \
9122 } while (0)
9123
9124 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9125 {
9126         struct tg3_hw_stats *sp = tp->hw_stats;
9127
9128         if (!netif_carrier_ok(tp->dev))
9129                 return;
9130
9131         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9132         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9133         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9134         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9135         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9136         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9137         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9138         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9139         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9140         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9141         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9142         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9143         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9144
9145         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9146         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9147         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9148         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9149         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9150         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9151         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9152         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9153         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9154         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9155         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9156         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9157         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9158         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9159
9160         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9161         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9162             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9163             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9164                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9165         } else {
9166                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9167                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9168                 if (val) {
9169                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9170                         sp->rx_discards.low += val;
9171                         if (sp->rx_discards.low < val)
9172                                 sp->rx_discards.high += 1;
9173                 }
9174                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9175         }
9176         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9177 }
9178
9179 static void tg3_chk_missed_msi(struct tg3 *tp)
9180 {
9181         u32 i;
9182
9183         for (i = 0; i < tp->irq_cnt; i++) {
9184                 struct tg3_napi *tnapi = &tp->napi[i];
9185
9186                 if (tg3_has_work(tnapi)) {
9187                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9188                             tnapi->last_tx_cons == tnapi->tx_cons) {
9189                                 if (tnapi->chk_msi_cnt < 1) {
9190                                         tnapi->chk_msi_cnt++;
9191                                         return;
9192                                 }
9193                                 tg3_msi(0, tnapi);
9194                         }
9195                 }
9196                 tnapi->chk_msi_cnt = 0;
9197                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9198                 tnapi->last_tx_cons = tnapi->tx_cons;
9199         }
9200 }
9201
9202 static void tg3_timer(unsigned long __opaque)
9203 {
9204         struct tg3 *tp = (struct tg3 *) __opaque;
9205
9206         if (tp->irq_sync)
9207                 goto restart_timer;
9208
9209         spin_lock(&tp->lock);
9210
9211         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9212             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9213                 tg3_chk_missed_msi(tp);
9214
9215         if (!tg3_flag(tp, TAGGED_STATUS)) {
9216                 /* All of this garbage is because when using non-tagged
9217                  * IRQ status the mailbox/status_block protocol the chip
9218                  * uses with the cpu is race prone.
9219                  */
9220                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9221                         tw32(GRC_LOCAL_CTRL,
9222                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9223                 } else {
9224                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9225                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9226                 }
9227
9228                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9229                         tg3_flag_set(tp, RESTART_TIMER);
9230                         spin_unlock(&tp->lock);
9231                         schedule_work(&tp->reset_task);
9232                         return;
9233                 }
9234         }
9235
9236         /* This part only runs once per second. */
9237         if (!--tp->timer_counter) {
9238                 if (tg3_flag(tp, 5705_PLUS))
9239                         tg3_periodic_fetch_stats(tp);
9240
9241                 if (tp->setlpicnt && !--tp->setlpicnt)
9242                         tg3_phy_eee_enable(tp);
9243
9244                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9245                         u32 mac_stat;
9246                         int phy_event;
9247
9248                         mac_stat = tr32(MAC_STATUS);
9249
9250                         phy_event = 0;
9251                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9252                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9253                                         phy_event = 1;
9254                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9255                                 phy_event = 1;
9256
9257                         if (phy_event)
9258                                 tg3_setup_phy(tp, 0);
9259                 } else if (tg3_flag(tp, POLL_SERDES)) {
9260                         u32 mac_stat = tr32(MAC_STATUS);
9261                         int need_setup = 0;
9262
9263                         if (netif_carrier_ok(tp->dev) &&
9264                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9265                                 need_setup = 1;
9266                         }
9267                         if (!netif_carrier_ok(tp->dev) &&
9268                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9269                                          MAC_STATUS_SIGNAL_DET))) {
9270                                 need_setup = 1;
9271                         }
9272                         if (need_setup) {
9273                                 if (!tp->serdes_counter) {
9274                                         tw32_f(MAC_MODE,
9275                                              (tp->mac_mode &
9276                                               ~MAC_MODE_PORT_MODE_MASK));
9277                                         udelay(40);
9278                                         tw32_f(MAC_MODE, tp->mac_mode);
9279                                         udelay(40);
9280                                 }
9281                                 tg3_setup_phy(tp, 0);
9282                         }
9283                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9284                            tg3_flag(tp, 5780_CLASS)) {
9285                         tg3_serdes_parallel_detect(tp);
9286                 }
9287
9288                 tp->timer_counter = tp->timer_multiplier;
9289         }
9290
9291         /* Heartbeat is only sent once every 2 seconds.
9292          *
9293          * The heartbeat is to tell the ASF firmware that the host
9294          * driver is still alive.  In the event that the OS crashes,
9295          * ASF needs to reset the hardware to free up the FIFO space
9296          * that may be filled with rx packets destined for the host.
9297          * If the FIFO is full, ASF will no longer function properly.
9298          *
9299          * Unintended resets have been reported on real time kernels
9300          * where the timer doesn't run on time.  Netpoll will also have
9301          * same problem.
9302          *
9303          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9304          * to check the ring condition when the heartbeat is expiring
9305          * before doing the reset.  This will prevent most unintended
9306          * resets.
9307          */
9308         if (!--tp->asf_counter) {
9309                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9310                         tg3_wait_for_event_ack(tp);
9311
9312                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9313                                       FWCMD_NICDRV_ALIVE3);
9314                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9315                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9316                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9317
9318                         tg3_generate_fw_event(tp);
9319                 }
9320                 tp->asf_counter = tp->asf_multiplier;
9321         }
9322
9323         spin_unlock(&tp->lock);
9324
9325 restart_timer:
9326         tp->timer.expires = jiffies + tp->timer_offset;
9327         add_timer(&tp->timer);
9328 }
9329
9330 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9331 {
9332         irq_handler_t fn;
9333         unsigned long flags;
9334         char *name;
9335         struct tg3_napi *tnapi = &tp->napi[irq_num];
9336
9337         if (tp->irq_cnt == 1)
9338                 name = tp->dev->name;
9339         else {
9340                 name = &tnapi->irq_lbl[0];
9341                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9342                 name[IFNAMSIZ-1] = 0;
9343         }
9344
9345         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9346                 fn = tg3_msi;
9347                 if (tg3_flag(tp, 1SHOT_MSI))
9348                         fn = tg3_msi_1shot;
9349                 flags = 0;
9350         } else {
9351                 fn = tg3_interrupt;
9352                 if (tg3_flag(tp, TAGGED_STATUS))
9353                         fn = tg3_interrupt_tagged;
9354                 flags = IRQF_SHARED;
9355         }
9356
9357         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9358 }
9359
9360 static int tg3_test_interrupt(struct tg3 *tp)
9361 {
9362         struct tg3_napi *tnapi = &tp->napi[0];
9363         struct net_device *dev = tp->dev;
9364         int err, i, intr_ok = 0;
9365         u32 val;
9366
9367         if (!netif_running(dev))
9368                 return -ENODEV;
9369
9370         tg3_disable_ints(tp);
9371
9372         free_irq(tnapi->irq_vec, tnapi);
9373
9374         /*
9375          * Turn off MSI one shot mode.  Otherwise this test has no
9376          * observable way to know whether the interrupt was delivered.
9377          */
9378         if (tg3_flag(tp, 57765_PLUS)) {
9379                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9380                 tw32(MSGINT_MODE, val);
9381         }
9382
9383         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9384                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9385         if (err)
9386                 return err;
9387
9388         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9389         tg3_enable_ints(tp);
9390
9391         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9392                tnapi->coal_now);
9393
9394         for (i = 0; i < 5; i++) {
9395                 u32 int_mbox, misc_host_ctrl;
9396
9397                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9398                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9399
9400                 if ((int_mbox != 0) ||
9401                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9402                         intr_ok = 1;
9403                         break;
9404                 }
9405
9406                 if (tg3_flag(tp, 57765_PLUS) &&
9407                     tnapi->hw_status->status_tag != tnapi->last_tag)
9408                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9409
9410                 msleep(10);
9411         }
9412
9413         tg3_disable_ints(tp);
9414
9415         free_irq(tnapi->irq_vec, tnapi);
9416
9417         err = tg3_request_irq(tp, 0);
9418
9419         if (err)
9420                 return err;
9421
9422         if (intr_ok) {
9423                 /* Reenable MSI one shot mode. */
9424                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9425                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9426                         tw32(MSGINT_MODE, val);
9427                 }
9428                 return 0;
9429         }
9430
9431         return -EIO;
9432 }
9433
9434 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9435  * successfully restored
9436  */
9437 static int tg3_test_msi(struct tg3 *tp)
9438 {
9439         int err;
9440         u16 pci_cmd;
9441
9442         if (!tg3_flag(tp, USING_MSI))
9443                 return 0;
9444
9445         /* Turn off SERR reporting in case MSI terminates with Master
9446          * Abort.
9447          */
9448         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9449         pci_write_config_word(tp->pdev, PCI_COMMAND,
9450                               pci_cmd & ~PCI_COMMAND_SERR);
9451
9452         err = tg3_test_interrupt(tp);
9453
9454         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9455
9456         if (!err)
9457                 return 0;
9458
9459         /* other failures */
9460         if (err != -EIO)
9461                 return err;
9462
9463         /* MSI test failed, go back to INTx mode */
9464         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9465                     "to INTx mode. Please report this failure to the PCI "
9466                     "maintainer and include system chipset information\n");
9467
9468         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9469
9470         pci_disable_msi(tp->pdev);
9471
9472         tg3_flag_clear(tp, USING_MSI);
9473         tp->napi[0].irq_vec = tp->pdev->irq;
9474
9475         err = tg3_request_irq(tp, 0);
9476         if (err)
9477                 return err;
9478
9479         /* Need to reset the chip because the MSI cycle may have terminated
9480          * with Master Abort.
9481          */
9482         tg3_full_lock(tp, 1);
9483
9484         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9485         err = tg3_init_hw(tp, 1);
9486
9487         tg3_full_unlock(tp);
9488
9489         if (err)
9490                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9491
9492         return err;
9493 }
9494
9495 static int tg3_request_firmware(struct tg3 *tp)
9496 {
9497         const __be32 *fw_data;
9498
9499         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9500                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9501                            tp->fw_needed);
9502                 return -ENOENT;
9503         }
9504
9505         fw_data = (void *)tp->fw->data;
9506
9507         /* Firmware blob starts with version numbers, followed by
9508          * start address and _full_ length including BSS sections
9509          * (which must be longer than the actual data, of course
9510          */
9511
9512         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9513         if (tp->fw_len < (tp->fw->size - 12)) {
9514                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9515                            tp->fw_len, tp->fw_needed);
9516                 release_firmware(tp->fw);
9517                 tp->fw = NULL;
9518                 return -EINVAL;
9519         }
9520
9521         /* We no longer need firmware; we have it. */
9522         tp->fw_needed = NULL;
9523         return 0;
9524 }
9525
9526 static bool tg3_enable_msix(struct tg3 *tp)
9527 {
9528         int i, rc, cpus = num_online_cpus();
9529         struct msix_entry msix_ent[tp->irq_max];
9530
9531         if (cpus == 1)
9532                 /* Just fallback to the simpler MSI mode. */
9533                 return false;
9534
9535         /*
9536          * We want as many rx rings enabled as there are cpus.
9537          * The first MSIX vector only deals with link interrupts, etc,
9538          * so we add one to the number of vectors we are requesting.
9539          */
9540         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9541
9542         for (i = 0; i < tp->irq_max; i++) {
9543                 msix_ent[i].entry  = i;
9544                 msix_ent[i].vector = 0;
9545         }
9546
9547         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9548         if (rc < 0) {
9549                 return false;
9550         } else if (rc != 0) {
9551                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9552                         return false;
9553                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9554                               tp->irq_cnt, rc);
9555                 tp->irq_cnt = rc;
9556         }
9557
9558         for (i = 0; i < tp->irq_max; i++)
9559                 tp->napi[i].irq_vec = msix_ent[i].vector;
9560
9561         netif_set_real_num_tx_queues(tp->dev, 1);
9562         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9563         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9564                 pci_disable_msix(tp->pdev);
9565                 return false;
9566         }
9567
9568         if (tp->irq_cnt > 1) {
9569                 tg3_flag_set(tp, ENABLE_RSS);
9570
9571                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9572                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9573                         tg3_flag_set(tp, ENABLE_TSS);
9574                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9575                 }
9576         }
9577
9578         return true;
9579 }
9580
9581 static void tg3_ints_init(struct tg3 *tp)
9582 {
9583         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9584             !tg3_flag(tp, TAGGED_STATUS)) {
9585                 /* All MSI supporting chips should support tagged
9586                  * status.  Assert that this is the case.
9587                  */
9588                 netdev_warn(tp->dev,
9589                             "MSI without TAGGED_STATUS? Not using MSI\n");
9590                 goto defcfg;
9591         }
9592
9593         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9594                 tg3_flag_set(tp, USING_MSIX);
9595         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9596                 tg3_flag_set(tp, USING_MSI);
9597
9598         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9599                 u32 msi_mode = tr32(MSGINT_MODE);
9600                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9601                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9602                 if (!tg3_flag(tp, 1SHOT_MSI))
9603                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9604                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9605         }
9606 defcfg:
9607         if (!tg3_flag(tp, USING_MSIX)) {
9608                 tp->irq_cnt = 1;
9609                 tp->napi[0].irq_vec = tp->pdev->irq;
9610                 netif_set_real_num_tx_queues(tp->dev, 1);
9611                 netif_set_real_num_rx_queues(tp->dev, 1);
9612         }
9613 }
9614
9615 static void tg3_ints_fini(struct tg3 *tp)
9616 {
9617         if (tg3_flag(tp, USING_MSIX))
9618                 pci_disable_msix(tp->pdev);
9619         else if (tg3_flag(tp, USING_MSI))
9620                 pci_disable_msi(tp->pdev);
9621         tg3_flag_clear(tp, USING_MSI);
9622         tg3_flag_clear(tp, USING_MSIX);
9623         tg3_flag_clear(tp, ENABLE_RSS);
9624         tg3_flag_clear(tp, ENABLE_TSS);
9625 }
9626
9627 static int tg3_open(struct net_device *dev)
9628 {
9629         struct tg3 *tp = netdev_priv(dev);
9630         int i, err;
9631
9632         if (tp->fw_needed) {
9633                 err = tg3_request_firmware(tp);
9634                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9635                         if (err)
9636                                 return err;
9637                 } else if (err) {
9638                         netdev_warn(tp->dev, "TSO capability disabled\n");
9639                         tg3_flag_clear(tp, TSO_CAPABLE);
9640                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9641                         netdev_notice(tp->dev, "TSO capability restored\n");
9642                         tg3_flag_set(tp, TSO_CAPABLE);
9643                 }
9644         }
9645
9646         netif_carrier_off(tp->dev);
9647
9648         err = tg3_power_up(tp);
9649         if (err)
9650                 return err;
9651
9652         tg3_full_lock(tp, 0);
9653
9654         tg3_disable_ints(tp);
9655         tg3_flag_clear(tp, INIT_COMPLETE);
9656
9657         tg3_full_unlock(tp);
9658
9659         /*
9660          * Setup interrupts first so we know how
9661          * many NAPI resources to allocate
9662          */
9663         tg3_ints_init(tp);
9664
9665         /* The placement of this call is tied
9666          * to the setup and use of Host TX descriptors.
9667          */
9668         err = tg3_alloc_consistent(tp);
9669         if (err)
9670                 goto err_out1;
9671
9672         tg3_napi_init(tp);
9673
9674         tg3_napi_enable(tp);
9675
9676         for (i = 0; i < tp->irq_cnt; i++) {
9677                 struct tg3_napi *tnapi = &tp->napi[i];
9678                 err = tg3_request_irq(tp, i);
9679                 if (err) {
9680                         for (i--; i >= 0; i--) {
9681                                 tnapi = &tp->napi[i];
9682                                 free_irq(tnapi->irq_vec, tnapi);
9683                         }
9684                         goto err_out2;
9685                 }
9686         }
9687
9688         tg3_full_lock(tp, 0);
9689
9690         err = tg3_init_hw(tp, 1);
9691         if (err) {
9692                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9693                 tg3_free_rings(tp);
9694         } else {
9695                 if (tg3_flag(tp, TAGGED_STATUS) &&
9696                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9697                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9698                         tp->timer_offset = HZ;
9699                 else
9700                         tp->timer_offset = HZ / 10;
9701
9702                 BUG_ON(tp->timer_offset > HZ);
9703                 tp->timer_counter = tp->timer_multiplier =
9704                         (HZ / tp->timer_offset);
9705                 tp->asf_counter = tp->asf_multiplier =
9706                         ((HZ / tp->timer_offset) * 2);
9707
9708                 init_timer(&tp->timer);
9709                 tp->timer.expires = jiffies + tp->timer_offset;
9710                 tp->timer.data = (unsigned long) tp;
9711                 tp->timer.function = tg3_timer;
9712         }
9713
9714         tg3_full_unlock(tp);
9715
9716         if (err)
9717                 goto err_out3;
9718
9719         if (tg3_flag(tp, USING_MSI)) {
9720                 err = tg3_test_msi(tp);
9721
9722                 if (err) {
9723                         tg3_full_lock(tp, 0);
9724                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9725                         tg3_free_rings(tp);
9726                         tg3_full_unlock(tp);
9727
9728                         goto err_out2;
9729                 }
9730
9731                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9732                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9733
9734                         tw32(PCIE_TRANSACTION_CFG,
9735                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9736                 }
9737         }
9738
9739         tg3_phy_start(tp);
9740
9741         tg3_full_lock(tp, 0);
9742
9743         add_timer(&tp->timer);
9744         tg3_flag_set(tp, INIT_COMPLETE);
9745         tg3_enable_ints(tp);
9746
9747         tg3_full_unlock(tp);
9748
9749         netif_tx_start_all_queues(dev);
9750
9751         /*
9752          * Reset loopback feature if it was turned on while the device was down
9753          * make sure that it's installed properly now.
9754          */
9755         if (dev->features & NETIF_F_LOOPBACK)
9756                 tg3_set_loopback(dev, dev->features);
9757
9758         return 0;
9759
9760 err_out3:
9761         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9762                 struct tg3_napi *tnapi = &tp->napi[i];
9763                 free_irq(tnapi->irq_vec, tnapi);
9764         }
9765
9766 err_out2:
9767         tg3_napi_disable(tp);
9768         tg3_napi_fini(tp);
9769         tg3_free_consistent(tp);
9770
9771 err_out1:
9772         tg3_ints_fini(tp);
9773         tg3_frob_aux_power(tp, false);
9774         pci_set_power_state(tp->pdev, PCI_D3hot);
9775         return err;
9776 }
9777
9778 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9779                                                  struct rtnl_link_stats64 *);
9780 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9781
9782 static int tg3_close(struct net_device *dev)
9783 {
9784         int i;
9785         struct tg3 *tp = netdev_priv(dev);
9786
9787         tg3_napi_disable(tp);
9788         cancel_work_sync(&tp->reset_task);
9789
9790         netif_tx_stop_all_queues(dev);
9791
9792         del_timer_sync(&tp->timer);
9793
9794         tg3_phy_stop(tp);
9795
9796         tg3_full_lock(tp, 1);
9797
9798         tg3_disable_ints(tp);
9799
9800         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9801         tg3_free_rings(tp);
9802         tg3_flag_clear(tp, INIT_COMPLETE);
9803
9804         tg3_full_unlock(tp);
9805
9806         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9807                 struct tg3_napi *tnapi = &tp->napi[i];
9808                 free_irq(tnapi->irq_vec, tnapi);
9809         }
9810
9811         tg3_ints_fini(tp);
9812
9813         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9814
9815         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9816                sizeof(tp->estats_prev));
9817
9818         tg3_napi_fini(tp);
9819
9820         tg3_free_consistent(tp);
9821
9822         tg3_power_down(tp);
9823
9824         netif_carrier_off(tp->dev);
9825
9826         return 0;
9827 }
9828
9829 static inline u64 get_stat64(tg3_stat64_t *val)
9830 {
9831        return ((u64)val->high << 32) | ((u64)val->low);
9832 }
9833
9834 static u64 calc_crc_errors(struct tg3 *tp)
9835 {
9836         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9837
9838         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9839             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9840              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9841                 u32 val;
9842
9843                 spin_lock_bh(&tp->lock);
9844                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9845                         tg3_writephy(tp, MII_TG3_TEST1,
9846                                      val | MII_TG3_TEST1_CRC_EN);
9847                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9848                 } else
9849                         val = 0;
9850                 spin_unlock_bh(&tp->lock);
9851
9852                 tp->phy_crc_errors += val;
9853
9854                 return tp->phy_crc_errors;
9855         }
9856
9857         return get_stat64(&hw_stats->rx_fcs_errors);
9858 }
9859
9860 #define ESTAT_ADD(member) \
9861         estats->member =        old_estats->member + \
9862                                 get_stat64(&hw_stats->member)
9863
9864 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9865 {
9866         struct tg3_ethtool_stats *estats = &tp->estats;
9867         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9868         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9869
9870         if (!hw_stats)
9871                 return old_estats;
9872
9873         ESTAT_ADD(rx_octets);
9874         ESTAT_ADD(rx_fragments);
9875         ESTAT_ADD(rx_ucast_packets);
9876         ESTAT_ADD(rx_mcast_packets);
9877         ESTAT_ADD(rx_bcast_packets);
9878         ESTAT_ADD(rx_fcs_errors);
9879         ESTAT_ADD(rx_align_errors);
9880         ESTAT_ADD(rx_xon_pause_rcvd);
9881         ESTAT_ADD(rx_xoff_pause_rcvd);
9882         ESTAT_ADD(rx_mac_ctrl_rcvd);
9883         ESTAT_ADD(rx_xoff_entered);
9884         ESTAT_ADD(rx_frame_too_long_errors);
9885         ESTAT_ADD(rx_jabbers);
9886         ESTAT_ADD(rx_undersize_packets);
9887         ESTAT_ADD(rx_in_length_errors);
9888         ESTAT_ADD(rx_out_length_errors);
9889         ESTAT_ADD(rx_64_or_less_octet_packets);
9890         ESTAT_ADD(rx_65_to_127_octet_packets);
9891         ESTAT_ADD(rx_128_to_255_octet_packets);
9892         ESTAT_ADD(rx_256_to_511_octet_packets);
9893         ESTAT_ADD(rx_512_to_1023_octet_packets);
9894         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9895         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9896         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9897         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9898         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9899
9900         ESTAT_ADD(tx_octets);
9901         ESTAT_ADD(tx_collisions);
9902         ESTAT_ADD(tx_xon_sent);
9903         ESTAT_ADD(tx_xoff_sent);
9904         ESTAT_ADD(tx_flow_control);
9905         ESTAT_ADD(tx_mac_errors);
9906         ESTAT_ADD(tx_single_collisions);
9907         ESTAT_ADD(tx_mult_collisions);
9908         ESTAT_ADD(tx_deferred);
9909         ESTAT_ADD(tx_excessive_collisions);
9910         ESTAT_ADD(tx_late_collisions);
9911         ESTAT_ADD(tx_collide_2times);
9912         ESTAT_ADD(tx_collide_3times);
9913         ESTAT_ADD(tx_collide_4times);
9914         ESTAT_ADD(tx_collide_5times);
9915         ESTAT_ADD(tx_collide_6times);
9916         ESTAT_ADD(tx_collide_7times);
9917         ESTAT_ADD(tx_collide_8times);
9918         ESTAT_ADD(tx_collide_9times);
9919         ESTAT_ADD(tx_collide_10times);
9920         ESTAT_ADD(tx_collide_11times);
9921         ESTAT_ADD(tx_collide_12times);
9922         ESTAT_ADD(tx_collide_13times);
9923         ESTAT_ADD(tx_collide_14times);
9924         ESTAT_ADD(tx_collide_15times);
9925         ESTAT_ADD(tx_ucast_packets);
9926         ESTAT_ADD(tx_mcast_packets);
9927         ESTAT_ADD(tx_bcast_packets);
9928         ESTAT_ADD(tx_carrier_sense_errors);
9929         ESTAT_ADD(tx_discards);
9930         ESTAT_ADD(tx_errors);
9931
9932         ESTAT_ADD(dma_writeq_full);
9933         ESTAT_ADD(dma_write_prioq_full);
9934         ESTAT_ADD(rxbds_empty);
9935         ESTAT_ADD(rx_discards);
9936         ESTAT_ADD(rx_errors);
9937         ESTAT_ADD(rx_threshold_hit);
9938
9939         ESTAT_ADD(dma_readq_full);
9940         ESTAT_ADD(dma_read_prioq_full);
9941         ESTAT_ADD(tx_comp_queue_full);
9942
9943         ESTAT_ADD(ring_set_send_prod_index);
9944         ESTAT_ADD(ring_status_update);
9945         ESTAT_ADD(nic_irqs);
9946         ESTAT_ADD(nic_avoided_irqs);
9947         ESTAT_ADD(nic_tx_threshold_hit);
9948
9949         ESTAT_ADD(mbuf_lwm_thresh_hit);
9950
9951         return estats;
9952 }
9953
9954 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9955                                                  struct rtnl_link_stats64 *stats)
9956 {
9957         struct tg3 *tp = netdev_priv(dev);
9958         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9959         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9960
9961         if (!hw_stats)
9962                 return old_stats;
9963
9964         stats->rx_packets = old_stats->rx_packets +
9965                 get_stat64(&hw_stats->rx_ucast_packets) +
9966                 get_stat64(&hw_stats->rx_mcast_packets) +
9967                 get_stat64(&hw_stats->rx_bcast_packets);
9968
9969         stats->tx_packets = old_stats->tx_packets +
9970                 get_stat64(&hw_stats->tx_ucast_packets) +
9971                 get_stat64(&hw_stats->tx_mcast_packets) +
9972                 get_stat64(&hw_stats->tx_bcast_packets);
9973
9974         stats->rx_bytes = old_stats->rx_bytes +
9975                 get_stat64(&hw_stats->rx_octets);
9976         stats->tx_bytes = old_stats->tx_bytes +
9977                 get_stat64(&hw_stats->tx_octets);
9978
9979         stats->rx_errors = old_stats->rx_errors +
9980                 get_stat64(&hw_stats->rx_errors);
9981         stats->tx_errors = old_stats->tx_errors +
9982                 get_stat64(&hw_stats->tx_errors) +
9983                 get_stat64(&hw_stats->tx_mac_errors) +
9984                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9985                 get_stat64(&hw_stats->tx_discards);
9986
9987         stats->multicast = old_stats->multicast +
9988                 get_stat64(&hw_stats->rx_mcast_packets);
9989         stats->collisions = old_stats->collisions +
9990                 get_stat64(&hw_stats->tx_collisions);
9991
9992         stats->rx_length_errors = old_stats->rx_length_errors +
9993                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9994                 get_stat64(&hw_stats->rx_undersize_packets);
9995
9996         stats->rx_over_errors = old_stats->rx_over_errors +
9997                 get_stat64(&hw_stats->rxbds_empty);
9998         stats->rx_frame_errors = old_stats->rx_frame_errors +
9999                 get_stat64(&hw_stats->rx_align_errors);
10000         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10001                 get_stat64(&hw_stats->tx_discards);
10002         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10003                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10004
10005         stats->rx_crc_errors = old_stats->rx_crc_errors +
10006                 calc_crc_errors(tp);
10007
10008         stats->rx_missed_errors = old_stats->rx_missed_errors +
10009                 get_stat64(&hw_stats->rx_discards);
10010
10011         stats->rx_dropped = tp->rx_dropped;
10012         stats->tx_dropped = tp->tx_dropped;
10013
10014         return stats;
10015 }
10016
10017 static inline u32 calc_crc(unsigned char *buf, int len)
10018 {
10019         u32 reg;
10020         u32 tmp;
10021         int j, k;
10022
10023         reg = 0xffffffff;
10024
10025         for (j = 0; j < len; j++) {
10026                 reg ^= buf[j];
10027
10028                 for (k = 0; k < 8; k++) {
10029                         tmp = reg & 0x01;
10030
10031                         reg >>= 1;
10032
10033                         if (tmp)
10034                                 reg ^= 0xedb88320;
10035                 }
10036         }
10037
10038         return ~reg;
10039 }
10040
10041 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10042 {
10043         /* accept or reject all multicast frames */
10044         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10045         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10046         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10047         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10048 }
10049
10050 static void __tg3_set_rx_mode(struct net_device *dev)
10051 {
10052         struct tg3 *tp = netdev_priv(dev);
10053         u32 rx_mode;
10054
10055         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10056                                   RX_MODE_KEEP_VLAN_TAG);
10057
10058 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10059         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10060          * flag clear.
10061          */
10062         if (!tg3_flag(tp, ENABLE_ASF))
10063                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10064 #endif
10065
10066         if (dev->flags & IFF_PROMISC) {
10067                 /* Promiscuous mode. */
10068                 rx_mode |= RX_MODE_PROMISC;
10069         } else if (dev->flags & IFF_ALLMULTI) {
10070                 /* Accept all multicast. */
10071                 tg3_set_multi(tp, 1);
10072         } else if (netdev_mc_empty(dev)) {
10073                 /* Reject all multicast. */
10074                 tg3_set_multi(tp, 0);
10075         } else {
10076                 /* Accept one or more multicast(s). */
10077                 struct netdev_hw_addr *ha;
10078                 u32 mc_filter[4] = { 0, };
10079                 u32 regidx;
10080                 u32 bit;
10081                 u32 crc;
10082
10083                 netdev_for_each_mc_addr(ha, dev) {
10084                         crc = calc_crc(ha->addr, ETH_ALEN);
10085                         bit = ~crc & 0x7f;
10086                         regidx = (bit & 0x60) >> 5;
10087                         bit &= 0x1f;
10088                         mc_filter[regidx] |= (1 << bit);
10089                 }
10090
10091                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10092                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10093                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10094                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10095         }
10096
10097         if (rx_mode != tp->rx_mode) {
10098                 tp->rx_mode = rx_mode;
10099                 tw32_f(MAC_RX_MODE, rx_mode);
10100                 udelay(10);
10101         }
10102 }
10103
10104 static void tg3_set_rx_mode(struct net_device *dev)
10105 {
10106         struct tg3 *tp = netdev_priv(dev);
10107
10108         if (!netif_running(dev))
10109                 return;
10110
10111         tg3_full_lock(tp, 0);
10112         __tg3_set_rx_mode(dev);
10113         tg3_full_unlock(tp);
10114 }
10115
10116 static int tg3_get_regs_len(struct net_device *dev)
10117 {
10118         return TG3_REG_BLK_SIZE;
10119 }
10120
10121 static void tg3_get_regs(struct net_device *dev,
10122                 struct ethtool_regs *regs, void *_p)
10123 {
10124         struct tg3 *tp = netdev_priv(dev);
10125
10126         regs->version = 0;
10127
10128         memset(_p, 0, TG3_REG_BLK_SIZE);
10129
10130         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10131                 return;
10132
10133         tg3_full_lock(tp, 0);
10134
10135         tg3_dump_legacy_regs(tp, (u32 *)_p);
10136
10137         tg3_full_unlock(tp);
10138 }
10139
10140 static int tg3_get_eeprom_len(struct net_device *dev)
10141 {
10142         struct tg3 *tp = netdev_priv(dev);
10143
10144         return tp->nvram_size;
10145 }
10146
10147 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10148 {
10149         struct tg3 *tp = netdev_priv(dev);
10150         int ret;
10151         u8  *pd;
10152         u32 i, offset, len, b_offset, b_count;
10153         __be32 val;
10154
10155         if (tg3_flag(tp, NO_NVRAM))
10156                 return -EINVAL;
10157
10158         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10159                 return -EAGAIN;
10160
10161         offset = eeprom->offset;
10162         len = eeprom->len;
10163         eeprom->len = 0;
10164
10165         eeprom->magic = TG3_EEPROM_MAGIC;
10166
10167         if (offset & 3) {
10168                 /* adjustments to start on required 4 byte boundary */
10169                 b_offset = offset & 3;
10170                 b_count = 4 - b_offset;
10171                 if (b_count > len) {
10172                         /* i.e. offset=1 len=2 */
10173                         b_count = len;
10174                 }
10175                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10176                 if (ret)
10177                         return ret;
10178                 memcpy(data, ((char *)&val) + b_offset, b_count);
10179                 len -= b_count;
10180                 offset += b_count;
10181                 eeprom->len += b_count;
10182         }
10183
10184         /* read bytes up to the last 4 byte boundary */
10185         pd = &data[eeprom->len];
10186         for (i = 0; i < (len - (len & 3)); i += 4) {
10187                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10188                 if (ret) {
10189                         eeprom->len += i;
10190                         return ret;
10191                 }
10192                 memcpy(pd + i, &val, 4);
10193         }
10194         eeprom->len += i;
10195
10196         if (len & 3) {
10197                 /* read last bytes not ending on 4 byte boundary */
10198                 pd = &data[eeprom->len];
10199                 b_count = len & 3;
10200                 b_offset = offset + len - b_count;
10201                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10202                 if (ret)
10203                         return ret;
10204                 memcpy(pd, &val, b_count);
10205                 eeprom->len += b_count;
10206         }
10207         return 0;
10208 }
10209
10210 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10211
10212 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10213 {
10214         struct tg3 *tp = netdev_priv(dev);
10215         int ret;
10216         u32 offset, len, b_offset, odd_len;
10217         u8 *buf;
10218         __be32 start, end;
10219
10220         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10221                 return -EAGAIN;
10222
10223         if (tg3_flag(tp, NO_NVRAM) ||
10224             eeprom->magic != TG3_EEPROM_MAGIC)
10225                 return -EINVAL;
10226
10227         offset = eeprom->offset;
10228         len = eeprom->len;
10229
10230         if ((b_offset = (offset & 3))) {
10231                 /* adjustments to start on required 4 byte boundary */
10232                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10233                 if (ret)
10234                         return ret;
10235                 len += b_offset;
10236                 offset &= ~3;
10237                 if (len < 4)
10238                         len = 4;
10239         }
10240
10241         odd_len = 0;
10242         if (len & 3) {
10243                 /* adjustments to end on required 4 byte boundary */
10244                 odd_len = 1;
10245                 len = (len + 3) & ~3;
10246                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10247                 if (ret)
10248                         return ret;
10249         }
10250
10251         buf = data;
10252         if (b_offset || odd_len) {
10253                 buf = kmalloc(len, GFP_KERNEL);
10254                 if (!buf)
10255                         return -ENOMEM;
10256                 if (b_offset)
10257                         memcpy(buf, &start, 4);
10258                 if (odd_len)
10259                         memcpy(buf+len-4, &end, 4);
10260                 memcpy(buf + b_offset, data, eeprom->len);
10261         }
10262
10263         ret = tg3_nvram_write_block(tp, offset, len, buf);
10264
10265         if (buf != data)
10266                 kfree(buf);
10267
10268         return ret;
10269 }
10270
10271 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10272 {
10273         struct tg3 *tp = netdev_priv(dev);
10274
10275         if (tg3_flag(tp, USE_PHYLIB)) {
10276                 struct phy_device *phydev;
10277                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10278                         return -EAGAIN;
10279                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10280                 return phy_ethtool_gset(phydev, cmd);
10281         }
10282
10283         cmd->supported = (SUPPORTED_Autoneg);
10284
10285         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10286                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10287                                    SUPPORTED_1000baseT_Full);
10288
10289         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10290                 cmd->supported |= (SUPPORTED_100baseT_Half |
10291                                   SUPPORTED_100baseT_Full |
10292                                   SUPPORTED_10baseT_Half |
10293                                   SUPPORTED_10baseT_Full |
10294                                   SUPPORTED_TP);
10295                 cmd->port = PORT_TP;
10296         } else {
10297                 cmd->supported |= SUPPORTED_FIBRE;
10298                 cmd->port = PORT_FIBRE;
10299         }
10300
10301         cmd->advertising = tp->link_config.advertising;
10302         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10303                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10304                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10305                                 cmd->advertising |= ADVERTISED_Pause;
10306                         } else {
10307                                 cmd->advertising |= ADVERTISED_Pause |
10308                                                     ADVERTISED_Asym_Pause;
10309                         }
10310                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10311                         cmd->advertising |= ADVERTISED_Asym_Pause;
10312                 }
10313         }
10314         if (netif_running(dev)) {
10315                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10316                 cmd->duplex = tp->link_config.active_duplex;
10317         } else {
10318                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10319                 cmd->duplex = DUPLEX_INVALID;
10320         }
10321         cmd->phy_address = tp->phy_addr;
10322         cmd->transceiver = XCVR_INTERNAL;
10323         cmd->autoneg = tp->link_config.autoneg;
10324         cmd->maxtxpkt = 0;
10325         cmd->maxrxpkt = 0;
10326         return 0;
10327 }
10328
10329 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10330 {
10331         struct tg3 *tp = netdev_priv(dev);
10332         u32 speed = ethtool_cmd_speed(cmd);
10333
10334         if (tg3_flag(tp, USE_PHYLIB)) {
10335                 struct phy_device *phydev;
10336                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10337                         return -EAGAIN;
10338                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10339                 return phy_ethtool_sset(phydev, cmd);
10340         }
10341
10342         if (cmd->autoneg != AUTONEG_ENABLE &&
10343             cmd->autoneg != AUTONEG_DISABLE)
10344                 return -EINVAL;
10345
10346         if (cmd->autoneg == AUTONEG_DISABLE &&
10347             cmd->duplex != DUPLEX_FULL &&
10348             cmd->duplex != DUPLEX_HALF)
10349                 return -EINVAL;
10350
10351         if (cmd->autoneg == AUTONEG_ENABLE) {
10352                 u32 mask = ADVERTISED_Autoneg |
10353                            ADVERTISED_Pause |
10354                            ADVERTISED_Asym_Pause;
10355
10356                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10357                         mask |= ADVERTISED_1000baseT_Half |
10358                                 ADVERTISED_1000baseT_Full;
10359
10360                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10361                         mask |= ADVERTISED_100baseT_Half |
10362                                 ADVERTISED_100baseT_Full |
10363                                 ADVERTISED_10baseT_Half |
10364                                 ADVERTISED_10baseT_Full |
10365                                 ADVERTISED_TP;
10366                 else
10367                         mask |= ADVERTISED_FIBRE;
10368
10369                 if (cmd->advertising & ~mask)
10370                         return -EINVAL;
10371
10372                 mask &= (ADVERTISED_1000baseT_Half |
10373                          ADVERTISED_1000baseT_Full |
10374                          ADVERTISED_100baseT_Half |
10375                          ADVERTISED_100baseT_Full |
10376                          ADVERTISED_10baseT_Half |
10377                          ADVERTISED_10baseT_Full);
10378
10379                 cmd->advertising &= mask;
10380         } else {
10381                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10382                         if (speed != SPEED_1000)
10383                                 return -EINVAL;
10384
10385                         if (cmd->duplex != DUPLEX_FULL)
10386                                 return -EINVAL;
10387                 } else {
10388                         if (speed != SPEED_100 &&
10389                             speed != SPEED_10)
10390                                 return -EINVAL;
10391                 }
10392         }
10393
10394         tg3_full_lock(tp, 0);
10395
10396         tp->link_config.autoneg = cmd->autoneg;
10397         if (cmd->autoneg == AUTONEG_ENABLE) {
10398                 tp->link_config.advertising = (cmd->advertising |
10399                                               ADVERTISED_Autoneg);
10400                 tp->link_config.speed = SPEED_INVALID;
10401                 tp->link_config.duplex = DUPLEX_INVALID;
10402         } else {
10403                 tp->link_config.advertising = 0;
10404                 tp->link_config.speed = speed;
10405                 tp->link_config.duplex = cmd->duplex;
10406         }
10407
10408         tp->link_config.orig_speed = tp->link_config.speed;
10409         tp->link_config.orig_duplex = tp->link_config.duplex;
10410         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10411
10412         if (netif_running(dev))
10413                 tg3_setup_phy(tp, 1);
10414
10415         tg3_full_unlock(tp);
10416
10417         return 0;
10418 }
10419
10420 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10421 {
10422         struct tg3 *tp = netdev_priv(dev);
10423
10424         strcpy(info->driver, DRV_MODULE_NAME);
10425         strcpy(info->version, DRV_MODULE_VERSION);
10426         strcpy(info->fw_version, tp->fw_ver);
10427         strcpy(info->bus_info, pci_name(tp->pdev));
10428 }
10429
10430 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10431 {
10432         struct tg3 *tp = netdev_priv(dev);
10433
10434         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10435                 wol->supported = WAKE_MAGIC;
10436         else
10437                 wol->supported = 0;
10438         wol->wolopts = 0;
10439         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10440                 wol->wolopts = WAKE_MAGIC;
10441         memset(&wol->sopass, 0, sizeof(wol->sopass));
10442 }
10443
10444 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10445 {
10446         struct tg3 *tp = netdev_priv(dev);
10447         struct device *dp = &tp->pdev->dev;
10448
10449         if (wol->wolopts & ~WAKE_MAGIC)
10450                 return -EINVAL;
10451         if ((wol->wolopts & WAKE_MAGIC) &&
10452             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10453                 return -EINVAL;
10454
10455         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10456
10457         spin_lock_bh(&tp->lock);
10458         if (device_may_wakeup(dp))
10459                 tg3_flag_set(tp, WOL_ENABLE);
10460         else
10461                 tg3_flag_clear(tp, WOL_ENABLE);
10462         spin_unlock_bh(&tp->lock);
10463
10464         return 0;
10465 }
10466
10467 static u32 tg3_get_msglevel(struct net_device *dev)
10468 {
10469         struct tg3 *tp = netdev_priv(dev);
10470         return tp->msg_enable;
10471 }
10472
10473 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10474 {
10475         struct tg3 *tp = netdev_priv(dev);
10476         tp->msg_enable = value;
10477 }
10478
10479 static int tg3_nway_reset(struct net_device *dev)
10480 {
10481         struct tg3 *tp = netdev_priv(dev);
10482         int r;
10483
10484         if (!netif_running(dev))
10485                 return -EAGAIN;
10486
10487         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10488                 return -EINVAL;
10489
10490         if (tg3_flag(tp, USE_PHYLIB)) {
10491                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10492                         return -EAGAIN;
10493                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10494         } else {
10495                 u32 bmcr;
10496
10497                 spin_lock_bh(&tp->lock);
10498                 r = -EINVAL;
10499                 tg3_readphy(tp, MII_BMCR, &bmcr);
10500                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10501                     ((bmcr & BMCR_ANENABLE) ||
10502                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10503                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10504                                                    BMCR_ANENABLE);
10505                         r = 0;
10506                 }
10507                 spin_unlock_bh(&tp->lock);
10508         }
10509
10510         return r;
10511 }
10512
10513 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10514 {
10515         struct tg3 *tp = netdev_priv(dev);
10516
10517         ering->rx_max_pending = tp->rx_std_ring_mask;
10518         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10519                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10520         else
10521                 ering->rx_jumbo_max_pending = 0;
10522
10523         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10524
10525         ering->rx_pending = tp->rx_pending;
10526         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10527                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10528         else
10529                 ering->rx_jumbo_pending = 0;
10530
10531         ering->tx_pending = tp->napi[0].tx_pending;
10532 }
10533
10534 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10535 {
10536         struct tg3 *tp = netdev_priv(dev);
10537         int i, irq_sync = 0, err = 0;
10538
10539         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10540             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10541             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10542             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10543             (tg3_flag(tp, TSO_BUG) &&
10544              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10545                 return -EINVAL;
10546
10547         if (netif_running(dev)) {
10548                 tg3_phy_stop(tp);
10549                 tg3_netif_stop(tp);
10550                 irq_sync = 1;
10551         }
10552
10553         tg3_full_lock(tp, irq_sync);
10554
10555         tp->rx_pending = ering->rx_pending;
10556
10557         if (tg3_flag(tp, MAX_RXPEND_64) &&
10558             tp->rx_pending > 63)
10559                 tp->rx_pending = 63;
10560         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10561
10562         for (i = 0; i < tp->irq_max; i++)
10563                 tp->napi[i].tx_pending = ering->tx_pending;
10564
10565         if (netif_running(dev)) {
10566                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10567                 err = tg3_restart_hw(tp, 1);
10568                 if (!err)
10569                         tg3_netif_start(tp);
10570         }
10571
10572         tg3_full_unlock(tp);
10573
10574         if (irq_sync && !err)
10575                 tg3_phy_start(tp);
10576
10577         return err;
10578 }
10579
10580 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10581 {
10582         struct tg3 *tp = netdev_priv(dev);
10583
10584         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10585
10586         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10587                 epause->rx_pause = 1;
10588         else
10589                 epause->rx_pause = 0;
10590
10591         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10592                 epause->tx_pause = 1;
10593         else
10594                 epause->tx_pause = 0;
10595 }
10596
10597 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10598 {
10599         struct tg3 *tp = netdev_priv(dev);
10600         int err = 0;
10601
10602         if (tg3_flag(tp, USE_PHYLIB)) {
10603                 u32 newadv;
10604                 struct phy_device *phydev;
10605
10606                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10607
10608                 if (!(phydev->supported & SUPPORTED_Pause) ||
10609                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10610                      (epause->rx_pause != epause->tx_pause)))
10611                         return -EINVAL;
10612
10613                 tp->link_config.flowctrl = 0;
10614                 if (epause->rx_pause) {
10615                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10616
10617                         if (epause->tx_pause) {
10618                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10619                                 newadv = ADVERTISED_Pause;
10620                         } else
10621                                 newadv = ADVERTISED_Pause |
10622                                          ADVERTISED_Asym_Pause;
10623                 } else if (epause->tx_pause) {
10624                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10625                         newadv = ADVERTISED_Asym_Pause;
10626                 } else
10627                         newadv = 0;
10628
10629                 if (epause->autoneg)
10630                         tg3_flag_set(tp, PAUSE_AUTONEG);
10631                 else
10632                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10633
10634                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10635                         u32 oldadv = phydev->advertising &
10636                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10637                         if (oldadv != newadv) {
10638                                 phydev->advertising &=
10639                                         ~(ADVERTISED_Pause |
10640                                           ADVERTISED_Asym_Pause);
10641                                 phydev->advertising |= newadv;
10642                                 if (phydev->autoneg) {
10643                                         /*
10644                                          * Always renegotiate the link to
10645                                          * inform our link partner of our
10646                                          * flow control settings, even if the
10647                                          * flow control is forced.  Let
10648                                          * tg3_adjust_link() do the final
10649                                          * flow control setup.
10650                                          */
10651                                         return phy_start_aneg(phydev);
10652                                 }
10653                         }
10654
10655                         if (!epause->autoneg)
10656                                 tg3_setup_flow_control(tp, 0, 0);
10657                 } else {
10658                         tp->link_config.orig_advertising &=
10659                                         ~(ADVERTISED_Pause |
10660                                           ADVERTISED_Asym_Pause);
10661                         tp->link_config.orig_advertising |= newadv;
10662                 }
10663         } else {
10664                 int irq_sync = 0;
10665
10666                 if (netif_running(dev)) {
10667                         tg3_netif_stop(tp);
10668                         irq_sync = 1;
10669                 }
10670
10671                 tg3_full_lock(tp, irq_sync);
10672
10673                 if (epause->autoneg)
10674                         tg3_flag_set(tp, PAUSE_AUTONEG);
10675                 else
10676                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10677                 if (epause->rx_pause)
10678                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10679                 else
10680                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10681                 if (epause->tx_pause)
10682                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10683                 else
10684                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10685
10686                 if (netif_running(dev)) {
10687                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10688                         err = tg3_restart_hw(tp, 1);
10689                         if (!err)
10690                                 tg3_netif_start(tp);
10691                 }
10692
10693                 tg3_full_unlock(tp);
10694         }
10695
10696         return err;
10697 }
10698
10699 static int tg3_get_sset_count(struct net_device *dev, int sset)
10700 {
10701         switch (sset) {
10702         case ETH_SS_TEST:
10703                 return TG3_NUM_TEST;
10704         case ETH_SS_STATS:
10705                 return TG3_NUM_STATS;
10706         default:
10707                 return -EOPNOTSUPP;
10708         }
10709 }
10710
10711 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10712 {
10713         switch (stringset) {
10714         case ETH_SS_STATS:
10715                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10716                 break;
10717         case ETH_SS_TEST:
10718                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10719                 break;
10720         default:
10721                 WARN_ON(1);     /* we need a WARN() */
10722                 break;
10723         }
10724 }
10725
10726 static int tg3_set_phys_id(struct net_device *dev,
10727                             enum ethtool_phys_id_state state)
10728 {
10729         struct tg3 *tp = netdev_priv(dev);
10730
10731         if (!netif_running(tp->dev))
10732                 return -EAGAIN;
10733
10734         switch (state) {
10735         case ETHTOOL_ID_ACTIVE:
10736                 return 1;       /* cycle on/off once per second */
10737
10738         case ETHTOOL_ID_ON:
10739                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10740                      LED_CTRL_1000MBPS_ON |
10741                      LED_CTRL_100MBPS_ON |
10742                      LED_CTRL_10MBPS_ON |
10743                      LED_CTRL_TRAFFIC_OVERRIDE |
10744                      LED_CTRL_TRAFFIC_BLINK |
10745                      LED_CTRL_TRAFFIC_LED);
10746                 break;
10747
10748         case ETHTOOL_ID_OFF:
10749                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10750                      LED_CTRL_TRAFFIC_OVERRIDE);
10751                 break;
10752
10753         case ETHTOOL_ID_INACTIVE:
10754                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10755                 break;
10756         }
10757
10758         return 0;
10759 }
10760
10761 static void tg3_get_ethtool_stats(struct net_device *dev,
10762                                    struct ethtool_stats *estats, u64 *tmp_stats)
10763 {
10764         struct tg3 *tp = netdev_priv(dev);
10765         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10766 }
10767
10768 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10769 {
10770         int i;
10771         __be32 *buf;
10772         u32 offset = 0, len = 0;
10773         u32 magic, val;
10774
10775         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10776                 return NULL;
10777
10778         if (magic == TG3_EEPROM_MAGIC) {
10779                 for (offset = TG3_NVM_DIR_START;
10780                      offset < TG3_NVM_DIR_END;
10781                      offset += TG3_NVM_DIRENT_SIZE) {
10782                         if (tg3_nvram_read(tp, offset, &val))
10783                                 return NULL;
10784
10785                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10786                             TG3_NVM_DIRTYPE_EXTVPD)
10787                                 break;
10788                 }
10789
10790                 if (offset != TG3_NVM_DIR_END) {
10791                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10792                         if (tg3_nvram_read(tp, offset + 4, &offset))
10793                                 return NULL;
10794
10795                         offset = tg3_nvram_logical_addr(tp, offset);
10796                 }
10797         }
10798
10799         if (!offset || !len) {
10800                 offset = TG3_NVM_VPD_OFF;
10801                 len = TG3_NVM_VPD_LEN;
10802         }
10803
10804         buf = kmalloc(len, GFP_KERNEL);
10805         if (buf == NULL)
10806                 return NULL;
10807
10808         if (magic == TG3_EEPROM_MAGIC) {
10809                 for (i = 0; i < len; i += 4) {
10810                         /* The data is in little-endian format in NVRAM.
10811                          * Use the big-endian read routines to preserve
10812                          * the byte order as it exists in NVRAM.
10813                          */
10814                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10815                                 goto error;
10816                 }
10817         } else {
10818                 u8 *ptr;
10819                 ssize_t cnt;
10820                 unsigned int pos = 0;
10821
10822                 ptr = (u8 *)&buf[0];
10823                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10824                         cnt = pci_read_vpd(tp->pdev, pos,
10825                                            len - pos, ptr);
10826                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10827                                 cnt = 0;
10828                         else if (cnt < 0)
10829                                 goto error;
10830                 }
10831                 if (pos != len)
10832                         goto error;
10833         }
10834
10835         *vpdlen = len;
10836
10837         return buf;
10838
10839 error:
10840         kfree(buf);
10841         return NULL;
10842 }
10843
10844 #define NVRAM_TEST_SIZE 0x100
10845 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10846 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10847 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10848 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10849 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10850 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10851 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10852 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10853
10854 static int tg3_test_nvram(struct tg3 *tp)
10855 {
10856         u32 csum, magic, len;
10857         __be32 *buf;
10858         int i, j, k, err = 0, size;
10859
10860         if (tg3_flag(tp, NO_NVRAM))
10861                 return 0;
10862
10863         if (tg3_nvram_read(tp, 0, &magic) != 0)
10864                 return -EIO;
10865
10866         if (magic == TG3_EEPROM_MAGIC)
10867                 size = NVRAM_TEST_SIZE;
10868         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10869                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10870                     TG3_EEPROM_SB_FORMAT_1) {
10871                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10872                         case TG3_EEPROM_SB_REVISION_0:
10873                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10874                                 break;
10875                         case TG3_EEPROM_SB_REVISION_2:
10876                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10877                                 break;
10878                         case TG3_EEPROM_SB_REVISION_3:
10879                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10880                                 break;
10881                         case TG3_EEPROM_SB_REVISION_4:
10882                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10883                                 break;
10884                         case TG3_EEPROM_SB_REVISION_5:
10885                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10886                                 break;
10887                         case TG3_EEPROM_SB_REVISION_6:
10888                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10889                                 break;
10890                         default:
10891                                 return -EIO;
10892                         }
10893                 } else
10894                         return 0;
10895         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10896                 size = NVRAM_SELFBOOT_HW_SIZE;
10897         else
10898                 return -EIO;
10899
10900         buf = kmalloc(size, GFP_KERNEL);
10901         if (buf == NULL)
10902                 return -ENOMEM;
10903
10904         err = -EIO;
10905         for (i = 0, j = 0; i < size; i += 4, j++) {
10906                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10907                 if (err)
10908                         break;
10909         }
10910         if (i < size)
10911                 goto out;
10912
10913         /* Selfboot format */
10914         magic = be32_to_cpu(buf[0]);
10915         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10916             TG3_EEPROM_MAGIC_FW) {
10917                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10918
10919                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10920                     TG3_EEPROM_SB_REVISION_2) {
10921                         /* For rev 2, the csum doesn't include the MBA. */
10922                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10923                                 csum8 += buf8[i];
10924                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10925                                 csum8 += buf8[i];
10926                 } else {
10927                         for (i = 0; i < size; i++)
10928                                 csum8 += buf8[i];
10929                 }
10930
10931                 if (csum8 == 0) {
10932                         err = 0;
10933                         goto out;
10934                 }
10935
10936                 err = -EIO;
10937                 goto out;
10938         }
10939
10940         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10941             TG3_EEPROM_MAGIC_HW) {
10942                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10943                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10944                 u8 *buf8 = (u8 *) buf;
10945
10946                 /* Separate the parity bits and the data bytes.  */
10947                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10948                         if ((i == 0) || (i == 8)) {
10949                                 int l;
10950                                 u8 msk;
10951
10952                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10953                                         parity[k++] = buf8[i] & msk;
10954                                 i++;
10955                         } else if (i == 16) {
10956                                 int l;
10957                                 u8 msk;
10958
10959                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10960                                         parity[k++] = buf8[i] & msk;
10961                                 i++;
10962
10963                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10964                                         parity[k++] = buf8[i] & msk;
10965                                 i++;
10966                         }
10967                         data[j++] = buf8[i];
10968                 }
10969
10970                 err = -EIO;
10971                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10972                         u8 hw8 = hweight8(data[i]);
10973
10974                         if ((hw8 & 0x1) && parity[i])
10975                                 goto out;
10976                         else if (!(hw8 & 0x1) && !parity[i])
10977                                 goto out;
10978                 }
10979                 err = 0;
10980                 goto out;
10981         }
10982
10983         err = -EIO;
10984
10985         /* Bootstrap checksum at offset 0x10 */
10986         csum = calc_crc((unsigned char *) buf, 0x10);
10987         if (csum != le32_to_cpu(buf[0x10/4]))
10988                 goto out;
10989
10990         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10991         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10992         if (csum != le32_to_cpu(buf[0xfc/4]))
10993                 goto out;
10994
10995         kfree(buf);
10996
10997         buf = tg3_vpd_readblock(tp, &len);
10998         if (!buf)
10999                 return -ENOMEM;
11000
11001         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11002         if (i > 0) {
11003                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11004                 if (j < 0)
11005                         goto out;
11006
11007                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11008                         goto out;
11009
11010                 i += PCI_VPD_LRDT_TAG_SIZE;
11011                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11012                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11013                 if (j > 0) {
11014                         u8 csum8 = 0;
11015
11016                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11017
11018                         for (i = 0; i <= j; i++)
11019                                 csum8 += ((u8 *)buf)[i];
11020
11021                         if (csum8)
11022                                 goto out;
11023                 }
11024         }
11025
11026         err = 0;
11027
11028 out:
11029         kfree(buf);
11030         return err;
11031 }
11032
11033 #define TG3_SERDES_TIMEOUT_SEC  2
11034 #define TG3_COPPER_TIMEOUT_SEC  6
11035
11036 static int tg3_test_link(struct tg3 *tp)
11037 {
11038         int i, max;
11039
11040         if (!netif_running(tp->dev))
11041                 return -ENODEV;
11042
11043         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11044                 max = TG3_SERDES_TIMEOUT_SEC;
11045         else
11046                 max = TG3_COPPER_TIMEOUT_SEC;
11047
11048         for (i = 0; i < max; i++) {
11049                 if (netif_carrier_ok(tp->dev))
11050                         return 0;
11051
11052                 if (msleep_interruptible(1000))
11053                         break;
11054         }
11055
11056         return -EIO;
11057 }
11058
11059 /* Only test the commonly used registers */
11060 static int tg3_test_registers(struct tg3 *tp)
11061 {
11062         int i, is_5705, is_5750;
11063         u32 offset, read_mask, write_mask, val, save_val, read_val;
11064         static struct {
11065                 u16 offset;
11066                 u16 flags;
11067 #define TG3_FL_5705     0x1
11068 #define TG3_FL_NOT_5705 0x2
11069 #define TG3_FL_NOT_5788 0x4
11070 #define TG3_FL_NOT_5750 0x8
11071                 u32 read_mask;
11072                 u32 write_mask;
11073         } reg_tbl[] = {
11074                 /* MAC Control Registers */
11075                 { MAC_MODE, TG3_FL_NOT_5705,
11076                         0x00000000, 0x00ef6f8c },
11077                 { MAC_MODE, TG3_FL_5705,
11078                         0x00000000, 0x01ef6b8c },
11079                 { MAC_STATUS, TG3_FL_NOT_5705,
11080                         0x03800107, 0x00000000 },
11081                 { MAC_STATUS, TG3_FL_5705,
11082                         0x03800100, 0x00000000 },
11083                 { MAC_ADDR_0_HIGH, 0x0000,
11084                         0x00000000, 0x0000ffff },
11085                 { MAC_ADDR_0_LOW, 0x0000,
11086                         0x00000000, 0xffffffff },
11087                 { MAC_RX_MTU_SIZE, 0x0000,
11088                         0x00000000, 0x0000ffff },
11089                 { MAC_TX_MODE, 0x0000,
11090                         0x00000000, 0x00000070 },
11091                 { MAC_TX_LENGTHS, 0x0000,
11092                         0x00000000, 0x00003fff },
11093                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11094                         0x00000000, 0x000007fc },
11095                 { MAC_RX_MODE, TG3_FL_5705,
11096                         0x00000000, 0x000007dc },
11097                 { MAC_HASH_REG_0, 0x0000,
11098                         0x00000000, 0xffffffff },
11099                 { MAC_HASH_REG_1, 0x0000,
11100                         0x00000000, 0xffffffff },
11101                 { MAC_HASH_REG_2, 0x0000,
11102                         0x00000000, 0xffffffff },
11103                 { MAC_HASH_REG_3, 0x0000,
11104                         0x00000000, 0xffffffff },
11105
11106                 /* Receive Data and Receive BD Initiator Control Registers. */
11107                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11108                         0x00000000, 0xffffffff },
11109                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11110                         0x00000000, 0xffffffff },
11111                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11112                         0x00000000, 0x00000003 },
11113                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11114                         0x00000000, 0xffffffff },
11115                 { RCVDBDI_STD_BD+0, 0x0000,
11116                         0x00000000, 0xffffffff },
11117                 { RCVDBDI_STD_BD+4, 0x0000,
11118                         0x00000000, 0xffffffff },
11119                 { RCVDBDI_STD_BD+8, 0x0000,
11120                         0x00000000, 0xffff0002 },
11121                 { RCVDBDI_STD_BD+0xc, 0x0000,
11122                         0x00000000, 0xffffffff },
11123
11124                 /* Receive BD Initiator Control Registers. */
11125                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11126                         0x00000000, 0xffffffff },
11127                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11128                         0x00000000, 0x000003ff },
11129                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11130                         0x00000000, 0xffffffff },
11131
11132                 /* Host Coalescing Control Registers. */
11133                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11134                         0x00000000, 0x00000004 },
11135                 { HOSTCC_MODE, TG3_FL_5705,
11136                         0x00000000, 0x000000f6 },
11137                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11138                         0x00000000, 0xffffffff },
11139                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11140                         0x00000000, 0x000003ff },
11141                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11142                         0x00000000, 0xffffffff },
11143                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11144                         0x00000000, 0x000003ff },
11145                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11146                         0x00000000, 0xffffffff },
11147                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11148                         0x00000000, 0x000000ff },
11149                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11150                         0x00000000, 0xffffffff },
11151                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11152                         0x00000000, 0x000000ff },
11153                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11154                         0x00000000, 0xffffffff },
11155                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11156                         0x00000000, 0xffffffff },
11157                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11158                         0x00000000, 0xffffffff },
11159                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11160                         0x00000000, 0x000000ff },
11161                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11162                         0x00000000, 0xffffffff },
11163                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11164                         0x00000000, 0x000000ff },
11165                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11166                         0x00000000, 0xffffffff },
11167                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11168                         0x00000000, 0xffffffff },
11169                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11170                         0x00000000, 0xffffffff },
11171                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11172                         0x00000000, 0xffffffff },
11173                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11174                         0x00000000, 0xffffffff },
11175                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11176                         0xffffffff, 0x00000000 },
11177                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11178                         0xffffffff, 0x00000000 },
11179
11180                 /* Buffer Manager Control Registers. */
11181                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11182                         0x00000000, 0x007fff80 },
11183                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11184                         0x00000000, 0x007fffff },
11185                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11186                         0x00000000, 0x0000003f },
11187                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11188                         0x00000000, 0x000001ff },
11189                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11190                         0x00000000, 0x000001ff },
11191                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11192                         0xffffffff, 0x00000000 },
11193                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11194                         0xffffffff, 0x00000000 },
11195
11196                 /* Mailbox Registers */
11197                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11198                         0x00000000, 0x000001ff },
11199                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11200                         0x00000000, 0x000001ff },
11201                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11202                         0x00000000, 0x000007ff },
11203                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11204                         0x00000000, 0x000001ff },
11205
11206                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11207         };
11208
11209         is_5705 = is_5750 = 0;
11210         if (tg3_flag(tp, 5705_PLUS)) {
11211                 is_5705 = 1;
11212                 if (tg3_flag(tp, 5750_PLUS))
11213                         is_5750 = 1;
11214         }
11215
11216         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11217                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11218                         continue;
11219
11220                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11221                         continue;
11222
11223                 if (tg3_flag(tp, IS_5788) &&
11224                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11225                         continue;
11226
11227                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11228                         continue;
11229
11230                 offset = (u32) reg_tbl[i].offset;
11231                 read_mask = reg_tbl[i].read_mask;
11232                 write_mask = reg_tbl[i].write_mask;
11233
11234                 /* Save the original register content */
11235                 save_val = tr32(offset);
11236
11237                 /* Determine the read-only value. */
11238                 read_val = save_val & read_mask;
11239
11240                 /* Write zero to the register, then make sure the read-only bits
11241                  * are not changed and the read/write bits are all zeros.
11242                  */
11243                 tw32(offset, 0);
11244
11245                 val = tr32(offset);
11246
11247                 /* Test the read-only and read/write bits. */
11248                 if (((val & read_mask) != read_val) || (val & write_mask))
11249                         goto out;
11250
11251                 /* Write ones to all the bits defined by RdMask and WrMask, then
11252                  * make sure the read-only bits are not changed and the
11253                  * read/write bits are all ones.
11254                  */
11255                 tw32(offset, read_mask | write_mask);
11256
11257                 val = tr32(offset);
11258
11259                 /* Test the read-only bits. */
11260                 if ((val & read_mask) != read_val)
11261                         goto out;
11262
11263                 /* Test the read/write bits. */
11264                 if ((val & write_mask) != write_mask)
11265                         goto out;
11266
11267                 tw32(offset, save_val);
11268         }
11269
11270         return 0;
11271
11272 out:
11273         if (netif_msg_hw(tp))
11274                 netdev_err(tp->dev,
11275                            "Register test failed at offset %x\n", offset);
11276         tw32(offset, save_val);
11277         return -EIO;
11278 }
11279
11280 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11281 {
11282         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11283         int i;
11284         u32 j;
11285
11286         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11287                 for (j = 0; j < len; j += 4) {
11288                         u32 val;
11289
11290                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11291                         tg3_read_mem(tp, offset + j, &val);
11292                         if (val != test_pattern[i])
11293                                 return -EIO;
11294                 }
11295         }
11296         return 0;
11297 }
11298
11299 static int tg3_test_memory(struct tg3 *tp)
11300 {
11301         static struct mem_entry {
11302                 u32 offset;
11303                 u32 len;
11304         } mem_tbl_570x[] = {
11305                 { 0x00000000, 0x00b50},
11306                 { 0x00002000, 0x1c000},
11307                 { 0xffffffff, 0x00000}
11308         }, mem_tbl_5705[] = {
11309                 { 0x00000100, 0x0000c},
11310                 { 0x00000200, 0x00008},
11311                 { 0x00004000, 0x00800},
11312                 { 0x00006000, 0x01000},
11313                 { 0x00008000, 0x02000},
11314                 { 0x00010000, 0x0e000},
11315                 { 0xffffffff, 0x00000}
11316         }, mem_tbl_5755[] = {
11317                 { 0x00000200, 0x00008},
11318                 { 0x00004000, 0x00800},
11319                 { 0x00006000, 0x00800},
11320                 { 0x00008000, 0x02000},
11321                 { 0x00010000, 0x0c000},
11322                 { 0xffffffff, 0x00000}
11323         }, mem_tbl_5906[] = {
11324                 { 0x00000200, 0x00008},
11325                 { 0x00004000, 0x00400},
11326                 { 0x00006000, 0x00400},
11327                 { 0x00008000, 0x01000},
11328                 { 0x00010000, 0x01000},
11329                 { 0xffffffff, 0x00000}
11330         }, mem_tbl_5717[] = {
11331                 { 0x00000200, 0x00008},
11332                 { 0x00010000, 0x0a000},
11333                 { 0x00020000, 0x13c00},
11334                 { 0xffffffff, 0x00000}
11335         }, mem_tbl_57765[] = {
11336                 { 0x00000200, 0x00008},
11337                 { 0x00004000, 0x00800},
11338                 { 0x00006000, 0x09800},
11339                 { 0x00010000, 0x0a000},
11340                 { 0xffffffff, 0x00000}
11341         };
11342         struct mem_entry *mem_tbl;
11343         int err = 0;
11344         int i;
11345
11346         if (tg3_flag(tp, 5717_PLUS))
11347                 mem_tbl = mem_tbl_5717;
11348         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11349                 mem_tbl = mem_tbl_57765;
11350         else if (tg3_flag(tp, 5755_PLUS))
11351                 mem_tbl = mem_tbl_5755;
11352         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11353                 mem_tbl = mem_tbl_5906;
11354         else if (tg3_flag(tp, 5705_PLUS))
11355                 mem_tbl = mem_tbl_5705;
11356         else
11357                 mem_tbl = mem_tbl_570x;
11358
11359         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11360                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11361                 if (err)
11362                         break;
11363         }
11364
11365         return err;
11366 }
11367
11368 #define TG3_TSO_MSS             500
11369
11370 #define TG3_TSO_IP_HDR_LEN      20
11371 #define TG3_TSO_TCP_HDR_LEN     20
11372 #define TG3_TSO_TCP_OPT_LEN     12
11373
11374 static const u8 tg3_tso_header[] = {
11375 0x08, 0x00,
11376 0x45, 0x00, 0x00, 0x00,
11377 0x00, 0x00, 0x40, 0x00,
11378 0x40, 0x06, 0x00, 0x00,
11379 0x0a, 0x00, 0x00, 0x01,
11380 0x0a, 0x00, 0x00, 0x02,
11381 0x0d, 0x00, 0xe0, 0x00,
11382 0x00, 0x00, 0x01, 0x00,
11383 0x00, 0x00, 0x02, 0x00,
11384 0x80, 0x10, 0x10, 0x00,
11385 0x14, 0x09, 0x00, 0x00,
11386 0x01, 0x01, 0x08, 0x0a,
11387 0x11, 0x11, 0x11, 0x11,
11388 0x11, 0x11, 0x11, 0x11,
11389 };
11390
11391 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11392 {
11393         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11394         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11395         u32 budget;
11396         struct sk_buff *skb, *rx_skb;
11397         u8 *tx_data;
11398         dma_addr_t map;
11399         int num_pkts, tx_len, rx_len, i, err;
11400         struct tg3_rx_buffer_desc *desc;
11401         struct tg3_napi *tnapi, *rnapi;
11402         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11403
11404         tnapi = &tp->napi[0];
11405         rnapi = &tp->napi[0];
11406         if (tp->irq_cnt > 1) {
11407                 if (tg3_flag(tp, ENABLE_RSS))
11408                         rnapi = &tp->napi[1];
11409                 if (tg3_flag(tp, ENABLE_TSS))
11410                         tnapi = &tp->napi[1];
11411         }
11412         coal_now = tnapi->coal_now | rnapi->coal_now;
11413
11414         err = -EIO;
11415
11416         tx_len = pktsz;
11417         skb = netdev_alloc_skb(tp->dev, tx_len);
11418         if (!skb)
11419                 return -ENOMEM;
11420
11421         tx_data = skb_put(skb, tx_len);
11422         memcpy(tx_data, tp->dev->dev_addr, 6);
11423         memset(tx_data + 6, 0x0, 8);
11424
11425         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11426
11427         if (tso_loopback) {
11428                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11429
11430                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11431                               TG3_TSO_TCP_OPT_LEN;
11432
11433                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11434                        sizeof(tg3_tso_header));
11435                 mss = TG3_TSO_MSS;
11436
11437                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11438                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11439
11440                 /* Set the total length field in the IP header */
11441                 iph->tot_len = htons((u16)(mss + hdr_len));
11442
11443                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11444                               TXD_FLAG_CPU_POST_DMA);
11445
11446                 if (tg3_flag(tp, HW_TSO_1) ||
11447                     tg3_flag(tp, HW_TSO_2) ||
11448                     tg3_flag(tp, HW_TSO_3)) {
11449                         struct tcphdr *th;
11450                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11451                         th = (struct tcphdr *)&tx_data[val];
11452                         th->check = 0;
11453                 } else
11454                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11455
11456                 if (tg3_flag(tp, HW_TSO_3)) {
11457                         mss |= (hdr_len & 0xc) << 12;
11458                         if (hdr_len & 0x10)
11459                                 base_flags |= 0x00000010;
11460                         base_flags |= (hdr_len & 0x3e0) << 5;
11461                 } else if (tg3_flag(tp, HW_TSO_2))
11462                         mss |= hdr_len << 9;
11463                 else if (tg3_flag(tp, HW_TSO_1) ||
11464                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11465                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11466                 } else {
11467                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11468                 }
11469
11470                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11471         } else {
11472                 num_pkts = 1;
11473                 data_off = ETH_HLEN;
11474         }
11475
11476         for (i = data_off; i < tx_len; i++)
11477                 tx_data[i] = (u8) (i & 0xff);
11478
11479         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11480         if (pci_dma_mapping_error(tp->pdev, map)) {
11481                 dev_kfree_skb(skb);
11482                 return -EIO;
11483         }
11484
11485         val = tnapi->tx_prod;
11486         tnapi->tx_buffers[val].skb = skb;
11487         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11488
11489         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11490                rnapi->coal_now);
11491
11492         udelay(10);
11493
11494         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11495
11496         budget = tg3_tx_avail(tnapi);
11497         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11498                             base_flags | TXD_FLAG_END, mss, 0)) {
11499                 tnapi->tx_buffers[val].skb = NULL;
11500                 dev_kfree_skb(skb);
11501                 return -EIO;
11502         }
11503
11504         tnapi->tx_prod++;
11505
11506         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11507         tr32_mailbox(tnapi->prodmbox);
11508
11509         udelay(10);
11510
11511         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11512         for (i = 0; i < 35; i++) {
11513                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11514                        coal_now);
11515
11516                 udelay(10);
11517
11518                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11519                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11520                 if ((tx_idx == tnapi->tx_prod) &&
11521                     (rx_idx == (rx_start_idx + num_pkts)))
11522                         break;
11523         }
11524
11525         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11526         dev_kfree_skb(skb);
11527
11528         if (tx_idx != tnapi->tx_prod)
11529                 goto out;
11530
11531         if (rx_idx != rx_start_idx + num_pkts)
11532                 goto out;
11533
11534         val = data_off;
11535         while (rx_idx != rx_start_idx) {
11536                 desc = &rnapi->rx_rcb[rx_start_idx++];
11537                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11538                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11539
11540                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11541                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11542                         goto out;
11543
11544                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11545                          - ETH_FCS_LEN;
11546
11547                 if (!tso_loopback) {
11548                         if (rx_len != tx_len)
11549                                 goto out;
11550
11551                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11552                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11553                                         goto out;
11554                         } else {
11555                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11556                                         goto out;
11557                         }
11558                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11559                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11560                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11561                         goto out;
11562                 }
11563
11564                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11565                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11566                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11567                                              mapping);
11568                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11569                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11570                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11571                                              mapping);
11572                 } else
11573                         goto out;
11574
11575                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11576                                             PCI_DMA_FROMDEVICE);
11577
11578                 for (i = data_off; i < rx_len; i++, val++) {
11579                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11580                                 goto out;
11581                 }
11582         }
11583
11584         err = 0;
11585
11586         /* tg3_free_rings will unmap and free the rx_skb */
11587 out:
11588         return err;
11589 }
11590
11591 #define TG3_STD_LOOPBACK_FAILED         1
11592 #define TG3_JMB_LOOPBACK_FAILED         2
11593 #define TG3_TSO_LOOPBACK_FAILED         4
11594 #define TG3_LOOPBACK_FAILED \
11595         (TG3_STD_LOOPBACK_FAILED | \
11596          TG3_JMB_LOOPBACK_FAILED | \
11597          TG3_TSO_LOOPBACK_FAILED)
11598
11599 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11600 {
11601         int err = -EIO;
11602         u32 eee_cap;
11603
11604         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11605         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11606
11607         if (!netif_running(tp->dev)) {
11608                 data[0] = TG3_LOOPBACK_FAILED;
11609                 data[1] = TG3_LOOPBACK_FAILED;
11610                 if (do_extlpbk)
11611                         data[2] = TG3_LOOPBACK_FAILED;
11612                 goto done;
11613         }
11614
11615         err = tg3_reset_hw(tp, 1);
11616         if (err) {
11617                 data[0] = TG3_LOOPBACK_FAILED;
11618                 data[1] = TG3_LOOPBACK_FAILED;
11619                 if (do_extlpbk)
11620                         data[2] = TG3_LOOPBACK_FAILED;
11621                 goto done;
11622         }
11623
11624         if (tg3_flag(tp, ENABLE_RSS)) {
11625                 int i;
11626
11627                 /* Reroute all rx packets to the 1st queue */
11628                 for (i = MAC_RSS_INDIR_TBL_0;
11629                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11630                         tw32(i, 0x0);
11631         }
11632
11633         /* HW errata - mac loopback fails in some cases on 5780.
11634          * Normal traffic and PHY loopback are not affected by
11635          * errata.  Also, the MAC loopback test is deprecated for
11636          * all newer ASIC revisions.
11637          */
11638         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11639             !tg3_flag(tp, CPMU_PRESENT)) {
11640                 tg3_mac_loopback(tp, true);
11641
11642                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11643                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11644
11645                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11646                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11647                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11648
11649                 tg3_mac_loopback(tp, false);
11650         }
11651
11652         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11653             !tg3_flag(tp, USE_PHYLIB)) {
11654                 int i;
11655
11656                 tg3_phy_lpbk_set(tp, 0, false);
11657
11658                 /* Wait for link */
11659                 for (i = 0; i < 100; i++) {
11660                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11661                                 break;
11662                         mdelay(1);
11663                 }
11664
11665                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11666                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11667                 if (tg3_flag(tp, TSO_CAPABLE) &&
11668                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11669                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11670                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11671                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11672                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11673
11674                 if (do_extlpbk) {
11675                         tg3_phy_lpbk_set(tp, 0, true);
11676
11677                         /* All link indications report up, but the hardware
11678                          * isn't really ready for about 20 msec.  Double it
11679                          * to be sure.
11680                          */
11681                         mdelay(40);
11682
11683                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11684                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11685                         if (tg3_flag(tp, TSO_CAPABLE) &&
11686                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11687                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11688                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11689                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11690                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11691                 }
11692
11693                 /* Re-enable gphy autopowerdown. */
11694                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11695                         tg3_phy_toggle_apd(tp, true);
11696         }
11697
11698         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11699
11700 done:
11701         tp->phy_flags |= eee_cap;
11702
11703         return err;
11704 }
11705
11706 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11707                           u64 *data)
11708 {
11709         struct tg3 *tp = netdev_priv(dev);
11710         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11711
11712         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11713             tg3_power_up(tp)) {
11714                 etest->flags |= ETH_TEST_FL_FAILED;
11715                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11716                 return;
11717         }
11718
11719         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11720
11721         if (tg3_test_nvram(tp) != 0) {
11722                 etest->flags |= ETH_TEST_FL_FAILED;
11723                 data[0] = 1;
11724         }
11725         if (!doextlpbk && tg3_test_link(tp)) {
11726                 etest->flags |= ETH_TEST_FL_FAILED;
11727                 data[1] = 1;
11728         }
11729         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11730                 int err, err2 = 0, irq_sync = 0;
11731
11732                 if (netif_running(dev)) {
11733                         tg3_phy_stop(tp);
11734                         tg3_netif_stop(tp);
11735                         irq_sync = 1;
11736                 }
11737
11738                 tg3_full_lock(tp, irq_sync);
11739
11740                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11741                 err = tg3_nvram_lock(tp);
11742                 tg3_halt_cpu(tp, RX_CPU_BASE);
11743                 if (!tg3_flag(tp, 5705_PLUS))
11744                         tg3_halt_cpu(tp, TX_CPU_BASE);
11745                 if (!err)
11746                         tg3_nvram_unlock(tp);
11747
11748                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11749                         tg3_phy_reset(tp);
11750
11751                 if (tg3_test_registers(tp) != 0) {
11752                         etest->flags |= ETH_TEST_FL_FAILED;
11753                         data[2] = 1;
11754                 }
11755
11756                 if (tg3_test_memory(tp) != 0) {
11757                         etest->flags |= ETH_TEST_FL_FAILED;
11758                         data[3] = 1;
11759                 }
11760
11761                 if (doextlpbk)
11762                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11763
11764                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11765                         etest->flags |= ETH_TEST_FL_FAILED;
11766
11767                 tg3_full_unlock(tp);
11768
11769                 if (tg3_test_interrupt(tp) != 0) {
11770                         etest->flags |= ETH_TEST_FL_FAILED;
11771                         data[7] = 1;
11772                 }
11773
11774                 tg3_full_lock(tp, 0);
11775
11776                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11777                 if (netif_running(dev)) {
11778                         tg3_flag_set(tp, INIT_COMPLETE);
11779                         err2 = tg3_restart_hw(tp, 1);
11780                         if (!err2)
11781                                 tg3_netif_start(tp);
11782                 }
11783
11784                 tg3_full_unlock(tp);
11785
11786                 if (irq_sync && !err2)
11787                         tg3_phy_start(tp);
11788         }
11789         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11790                 tg3_power_down(tp);
11791
11792 }
11793
11794 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11795 {
11796         struct mii_ioctl_data *data = if_mii(ifr);
11797         struct tg3 *tp = netdev_priv(dev);
11798         int err;
11799
11800         if (tg3_flag(tp, USE_PHYLIB)) {
11801                 struct phy_device *phydev;
11802                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11803                         return -EAGAIN;
11804                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11805                 return phy_mii_ioctl(phydev, ifr, cmd);
11806         }
11807
11808         switch (cmd) {
11809         case SIOCGMIIPHY:
11810                 data->phy_id = tp->phy_addr;
11811
11812                 /* fallthru */
11813         case SIOCGMIIREG: {
11814                 u32 mii_regval;
11815
11816                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11817                         break;                  /* We have no PHY */
11818
11819                 if (!netif_running(dev))
11820                         return -EAGAIN;
11821
11822                 spin_lock_bh(&tp->lock);
11823                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11824                 spin_unlock_bh(&tp->lock);
11825
11826                 data->val_out = mii_regval;
11827
11828                 return err;
11829         }
11830
11831         case SIOCSMIIREG:
11832                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11833                         break;                  /* We have no PHY */
11834
11835                 if (!netif_running(dev))
11836                         return -EAGAIN;
11837
11838                 spin_lock_bh(&tp->lock);
11839                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11840                 spin_unlock_bh(&tp->lock);
11841
11842                 return err;
11843
11844         default:
11845                 /* do nothing */
11846                 break;
11847         }
11848         return -EOPNOTSUPP;
11849 }
11850
11851 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11852 {
11853         struct tg3 *tp = netdev_priv(dev);
11854
11855         memcpy(ec, &tp->coal, sizeof(*ec));
11856         return 0;
11857 }
11858
11859 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11860 {
11861         struct tg3 *tp = netdev_priv(dev);
11862         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11863         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11864
11865         if (!tg3_flag(tp, 5705_PLUS)) {
11866                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11867                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11868                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11869                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11870         }
11871
11872         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11873             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11874             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11875             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11876             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11877             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11878             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11879             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11880             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11881             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11882                 return -EINVAL;
11883
11884         /* No rx interrupts will be generated if both are zero */
11885         if ((ec->rx_coalesce_usecs == 0) &&
11886             (ec->rx_max_coalesced_frames == 0))
11887                 return -EINVAL;
11888
11889         /* No tx interrupts will be generated if both are zero */
11890         if ((ec->tx_coalesce_usecs == 0) &&
11891             (ec->tx_max_coalesced_frames == 0))
11892                 return -EINVAL;
11893
11894         /* Only copy relevant parameters, ignore all others. */
11895         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11896         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11897         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11898         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11899         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11900         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11901         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11902         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11903         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11904
11905         if (netif_running(dev)) {
11906                 tg3_full_lock(tp, 0);
11907                 __tg3_set_coalesce(tp, &tp->coal);
11908                 tg3_full_unlock(tp);
11909         }
11910         return 0;
11911 }
11912
11913 static const struct ethtool_ops tg3_ethtool_ops = {
11914         .get_settings           = tg3_get_settings,
11915         .set_settings           = tg3_set_settings,
11916         .get_drvinfo            = tg3_get_drvinfo,
11917         .get_regs_len           = tg3_get_regs_len,
11918         .get_regs               = tg3_get_regs,
11919         .get_wol                = tg3_get_wol,
11920         .set_wol                = tg3_set_wol,
11921         .get_msglevel           = tg3_get_msglevel,
11922         .set_msglevel           = tg3_set_msglevel,
11923         .nway_reset             = tg3_nway_reset,
11924         .get_link               = ethtool_op_get_link,
11925         .get_eeprom_len         = tg3_get_eeprom_len,
11926         .get_eeprom             = tg3_get_eeprom,
11927         .set_eeprom             = tg3_set_eeprom,
11928         .get_ringparam          = tg3_get_ringparam,
11929         .set_ringparam          = tg3_set_ringparam,
11930         .get_pauseparam         = tg3_get_pauseparam,
11931         .set_pauseparam         = tg3_set_pauseparam,
11932         .self_test              = tg3_self_test,
11933         .get_strings            = tg3_get_strings,
11934         .set_phys_id            = tg3_set_phys_id,
11935         .get_ethtool_stats      = tg3_get_ethtool_stats,
11936         .get_coalesce           = tg3_get_coalesce,
11937         .set_coalesce           = tg3_set_coalesce,
11938         .get_sset_count         = tg3_get_sset_count,
11939 };
11940
11941 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11942 {
11943         u32 cursize, val, magic;
11944
11945         tp->nvram_size = EEPROM_CHIP_SIZE;
11946
11947         if (tg3_nvram_read(tp, 0, &magic) != 0)
11948                 return;
11949
11950         if ((magic != TG3_EEPROM_MAGIC) &&
11951             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11952             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11953                 return;
11954
11955         /*
11956          * Size the chip by reading offsets at increasing powers of two.
11957          * When we encounter our validation signature, we know the addressing
11958          * has wrapped around, and thus have our chip size.
11959          */
11960         cursize = 0x10;
11961
11962         while (cursize < tp->nvram_size) {
11963                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11964                         return;
11965
11966                 if (val == magic)
11967                         break;
11968
11969                 cursize <<= 1;
11970         }
11971
11972         tp->nvram_size = cursize;
11973 }
11974
11975 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11976 {
11977         u32 val;
11978
11979         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11980                 return;
11981
11982         /* Selfboot format */
11983         if (val != TG3_EEPROM_MAGIC) {
11984                 tg3_get_eeprom_size(tp);
11985                 return;
11986         }
11987
11988         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11989                 if (val != 0) {
11990                         /* This is confusing.  We want to operate on the
11991                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11992                          * call will read from NVRAM and byteswap the data
11993                          * according to the byteswapping settings for all
11994                          * other register accesses.  This ensures the data we
11995                          * want will always reside in the lower 16-bits.
11996                          * However, the data in NVRAM is in LE format, which
11997                          * means the data from the NVRAM read will always be
11998                          * opposite the endianness of the CPU.  The 16-bit
11999                          * byteswap then brings the data to CPU endianness.
12000                          */
12001                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12002                         return;
12003                 }
12004         }
12005         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12006 }
12007
12008 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12009 {
12010         u32 nvcfg1;
12011
12012         nvcfg1 = tr32(NVRAM_CFG1);
12013         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12014                 tg3_flag_set(tp, FLASH);
12015         } else {
12016                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12017                 tw32(NVRAM_CFG1, nvcfg1);
12018         }
12019
12020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12021             tg3_flag(tp, 5780_CLASS)) {
12022                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12023                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12024                         tp->nvram_jedecnum = JEDEC_ATMEL;
12025                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12026                         tg3_flag_set(tp, NVRAM_BUFFERED);
12027                         break;
12028                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12029                         tp->nvram_jedecnum = JEDEC_ATMEL;
12030                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12031                         break;
12032                 case FLASH_VENDOR_ATMEL_EEPROM:
12033                         tp->nvram_jedecnum = JEDEC_ATMEL;
12034                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12035                         tg3_flag_set(tp, NVRAM_BUFFERED);
12036                         break;
12037                 case FLASH_VENDOR_ST:
12038                         tp->nvram_jedecnum = JEDEC_ST;
12039                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12040                         tg3_flag_set(tp, NVRAM_BUFFERED);
12041                         break;
12042                 case FLASH_VENDOR_SAIFUN:
12043                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12044                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12045                         break;
12046                 case FLASH_VENDOR_SST_SMALL:
12047                 case FLASH_VENDOR_SST_LARGE:
12048                         tp->nvram_jedecnum = JEDEC_SST;
12049                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12050                         break;
12051                 }
12052         } else {
12053                 tp->nvram_jedecnum = JEDEC_ATMEL;
12054                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12055                 tg3_flag_set(tp, NVRAM_BUFFERED);
12056         }
12057 }
12058
12059 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12060 {
12061         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12062         case FLASH_5752PAGE_SIZE_256:
12063                 tp->nvram_pagesize = 256;
12064                 break;
12065         case FLASH_5752PAGE_SIZE_512:
12066                 tp->nvram_pagesize = 512;
12067                 break;
12068         case FLASH_5752PAGE_SIZE_1K:
12069                 tp->nvram_pagesize = 1024;
12070                 break;
12071         case FLASH_5752PAGE_SIZE_2K:
12072                 tp->nvram_pagesize = 2048;
12073                 break;
12074         case FLASH_5752PAGE_SIZE_4K:
12075                 tp->nvram_pagesize = 4096;
12076                 break;
12077         case FLASH_5752PAGE_SIZE_264:
12078                 tp->nvram_pagesize = 264;
12079                 break;
12080         case FLASH_5752PAGE_SIZE_528:
12081                 tp->nvram_pagesize = 528;
12082                 break;
12083         }
12084 }
12085
12086 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12087 {
12088         u32 nvcfg1;
12089
12090         nvcfg1 = tr32(NVRAM_CFG1);
12091
12092         /* NVRAM protection for TPM */
12093         if (nvcfg1 & (1 << 27))
12094                 tg3_flag_set(tp, PROTECTED_NVRAM);
12095
12096         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12097         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12098         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12099                 tp->nvram_jedecnum = JEDEC_ATMEL;
12100                 tg3_flag_set(tp, NVRAM_BUFFERED);
12101                 break;
12102         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12103                 tp->nvram_jedecnum = JEDEC_ATMEL;
12104                 tg3_flag_set(tp, NVRAM_BUFFERED);
12105                 tg3_flag_set(tp, FLASH);
12106                 break;
12107         case FLASH_5752VENDOR_ST_M45PE10:
12108         case FLASH_5752VENDOR_ST_M45PE20:
12109         case FLASH_5752VENDOR_ST_M45PE40:
12110                 tp->nvram_jedecnum = JEDEC_ST;
12111                 tg3_flag_set(tp, NVRAM_BUFFERED);
12112                 tg3_flag_set(tp, FLASH);
12113                 break;
12114         }
12115
12116         if (tg3_flag(tp, FLASH)) {
12117                 tg3_nvram_get_pagesize(tp, nvcfg1);
12118         } else {
12119                 /* For eeprom, set pagesize to maximum eeprom size */
12120                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12121
12122                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12123                 tw32(NVRAM_CFG1, nvcfg1);
12124         }
12125 }
12126
12127 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12128 {
12129         u32 nvcfg1, protect = 0;
12130
12131         nvcfg1 = tr32(NVRAM_CFG1);
12132
12133         /* NVRAM protection for TPM */
12134         if (nvcfg1 & (1 << 27)) {
12135                 tg3_flag_set(tp, PROTECTED_NVRAM);
12136                 protect = 1;
12137         }
12138
12139         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12140         switch (nvcfg1) {
12141         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12142         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12143         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12144         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12145                 tp->nvram_jedecnum = JEDEC_ATMEL;
12146                 tg3_flag_set(tp, NVRAM_BUFFERED);
12147                 tg3_flag_set(tp, FLASH);
12148                 tp->nvram_pagesize = 264;
12149                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12150                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12151                         tp->nvram_size = (protect ? 0x3e200 :
12152                                           TG3_NVRAM_SIZE_512KB);
12153                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12154                         tp->nvram_size = (protect ? 0x1f200 :
12155                                           TG3_NVRAM_SIZE_256KB);
12156                 else
12157                         tp->nvram_size = (protect ? 0x1f200 :
12158                                           TG3_NVRAM_SIZE_128KB);
12159                 break;
12160         case FLASH_5752VENDOR_ST_M45PE10:
12161         case FLASH_5752VENDOR_ST_M45PE20:
12162         case FLASH_5752VENDOR_ST_M45PE40:
12163                 tp->nvram_jedecnum = JEDEC_ST;
12164                 tg3_flag_set(tp, NVRAM_BUFFERED);
12165                 tg3_flag_set(tp, FLASH);
12166                 tp->nvram_pagesize = 256;
12167                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12168                         tp->nvram_size = (protect ?
12169                                           TG3_NVRAM_SIZE_64KB :
12170                                           TG3_NVRAM_SIZE_128KB);
12171                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12172                         tp->nvram_size = (protect ?
12173                                           TG3_NVRAM_SIZE_64KB :
12174                                           TG3_NVRAM_SIZE_256KB);
12175                 else
12176                         tp->nvram_size = (protect ?
12177                                           TG3_NVRAM_SIZE_128KB :
12178                                           TG3_NVRAM_SIZE_512KB);
12179                 break;
12180         }
12181 }
12182
12183 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12184 {
12185         u32 nvcfg1;
12186
12187         nvcfg1 = tr32(NVRAM_CFG1);
12188
12189         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12190         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12191         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12192         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12193         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12194                 tp->nvram_jedecnum = JEDEC_ATMEL;
12195                 tg3_flag_set(tp, NVRAM_BUFFERED);
12196                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12197
12198                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12199                 tw32(NVRAM_CFG1, nvcfg1);
12200                 break;
12201         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12202         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12203         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12204         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12205                 tp->nvram_jedecnum = JEDEC_ATMEL;
12206                 tg3_flag_set(tp, NVRAM_BUFFERED);
12207                 tg3_flag_set(tp, FLASH);
12208                 tp->nvram_pagesize = 264;
12209                 break;
12210         case FLASH_5752VENDOR_ST_M45PE10:
12211         case FLASH_5752VENDOR_ST_M45PE20:
12212         case FLASH_5752VENDOR_ST_M45PE40:
12213                 tp->nvram_jedecnum = JEDEC_ST;
12214                 tg3_flag_set(tp, NVRAM_BUFFERED);
12215                 tg3_flag_set(tp, FLASH);
12216                 tp->nvram_pagesize = 256;
12217                 break;
12218         }
12219 }
12220
12221 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12222 {
12223         u32 nvcfg1, protect = 0;
12224
12225         nvcfg1 = tr32(NVRAM_CFG1);
12226
12227         /* NVRAM protection for TPM */
12228         if (nvcfg1 & (1 << 27)) {
12229                 tg3_flag_set(tp, PROTECTED_NVRAM);
12230                 protect = 1;
12231         }
12232
12233         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12234         switch (nvcfg1) {
12235         case FLASH_5761VENDOR_ATMEL_ADB021D:
12236         case FLASH_5761VENDOR_ATMEL_ADB041D:
12237         case FLASH_5761VENDOR_ATMEL_ADB081D:
12238         case FLASH_5761VENDOR_ATMEL_ADB161D:
12239         case FLASH_5761VENDOR_ATMEL_MDB021D:
12240         case FLASH_5761VENDOR_ATMEL_MDB041D:
12241         case FLASH_5761VENDOR_ATMEL_MDB081D:
12242         case FLASH_5761VENDOR_ATMEL_MDB161D:
12243                 tp->nvram_jedecnum = JEDEC_ATMEL;
12244                 tg3_flag_set(tp, NVRAM_BUFFERED);
12245                 tg3_flag_set(tp, FLASH);
12246                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12247                 tp->nvram_pagesize = 256;
12248                 break;
12249         case FLASH_5761VENDOR_ST_A_M45PE20:
12250         case FLASH_5761VENDOR_ST_A_M45PE40:
12251         case FLASH_5761VENDOR_ST_A_M45PE80:
12252         case FLASH_5761VENDOR_ST_A_M45PE16:
12253         case FLASH_5761VENDOR_ST_M_M45PE20:
12254         case FLASH_5761VENDOR_ST_M_M45PE40:
12255         case FLASH_5761VENDOR_ST_M_M45PE80:
12256         case FLASH_5761VENDOR_ST_M_M45PE16:
12257                 tp->nvram_jedecnum = JEDEC_ST;
12258                 tg3_flag_set(tp, NVRAM_BUFFERED);
12259                 tg3_flag_set(tp, FLASH);
12260                 tp->nvram_pagesize = 256;
12261                 break;
12262         }
12263
12264         if (protect) {
12265                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12266         } else {
12267                 switch (nvcfg1) {
12268                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12269                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12270                 case FLASH_5761VENDOR_ST_A_M45PE16:
12271                 case FLASH_5761VENDOR_ST_M_M45PE16:
12272                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12273                         break;
12274                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12275                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12276                 case FLASH_5761VENDOR_ST_A_M45PE80:
12277                 case FLASH_5761VENDOR_ST_M_M45PE80:
12278                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12279                         break;
12280                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12281                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12282                 case FLASH_5761VENDOR_ST_A_M45PE40:
12283                 case FLASH_5761VENDOR_ST_M_M45PE40:
12284                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12285                         break;
12286                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12287                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12288                 case FLASH_5761VENDOR_ST_A_M45PE20:
12289                 case FLASH_5761VENDOR_ST_M_M45PE20:
12290                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12291                         break;
12292                 }
12293         }
12294 }
12295
12296 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12297 {
12298         tp->nvram_jedecnum = JEDEC_ATMEL;
12299         tg3_flag_set(tp, NVRAM_BUFFERED);
12300         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12301 }
12302
12303 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12304 {
12305         u32 nvcfg1;
12306
12307         nvcfg1 = tr32(NVRAM_CFG1);
12308
12309         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12310         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12311         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12312                 tp->nvram_jedecnum = JEDEC_ATMEL;
12313                 tg3_flag_set(tp, NVRAM_BUFFERED);
12314                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12315
12316                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12317                 tw32(NVRAM_CFG1, nvcfg1);
12318                 return;
12319         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12320         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12321         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12322         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12323         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12324         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12325         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12326                 tp->nvram_jedecnum = JEDEC_ATMEL;
12327                 tg3_flag_set(tp, NVRAM_BUFFERED);
12328                 tg3_flag_set(tp, FLASH);
12329
12330                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12331                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12332                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12333                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12334                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12335                         break;
12336                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12337                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12338                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12339                         break;
12340                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12341                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12342                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12343                         break;
12344                 }
12345                 break;
12346         case FLASH_5752VENDOR_ST_M45PE10:
12347         case FLASH_5752VENDOR_ST_M45PE20:
12348         case FLASH_5752VENDOR_ST_M45PE40:
12349                 tp->nvram_jedecnum = JEDEC_ST;
12350                 tg3_flag_set(tp, NVRAM_BUFFERED);
12351                 tg3_flag_set(tp, FLASH);
12352
12353                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12354                 case FLASH_5752VENDOR_ST_M45PE10:
12355                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12356                         break;
12357                 case FLASH_5752VENDOR_ST_M45PE20:
12358                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12359                         break;
12360                 case FLASH_5752VENDOR_ST_M45PE40:
12361                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12362                         break;
12363                 }
12364                 break;
12365         default:
12366                 tg3_flag_set(tp, NO_NVRAM);
12367                 return;
12368         }
12369
12370         tg3_nvram_get_pagesize(tp, nvcfg1);
12371         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12372                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12373 }
12374
12375
12376 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12377 {
12378         u32 nvcfg1;
12379
12380         nvcfg1 = tr32(NVRAM_CFG1);
12381
12382         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12383         case FLASH_5717VENDOR_ATMEL_EEPROM:
12384         case FLASH_5717VENDOR_MICRO_EEPROM:
12385                 tp->nvram_jedecnum = JEDEC_ATMEL;
12386                 tg3_flag_set(tp, NVRAM_BUFFERED);
12387                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12388
12389                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12390                 tw32(NVRAM_CFG1, nvcfg1);
12391                 return;
12392         case FLASH_5717VENDOR_ATMEL_MDB011D:
12393         case FLASH_5717VENDOR_ATMEL_ADB011B:
12394         case FLASH_5717VENDOR_ATMEL_ADB011D:
12395         case FLASH_5717VENDOR_ATMEL_MDB021D:
12396         case FLASH_5717VENDOR_ATMEL_ADB021B:
12397         case FLASH_5717VENDOR_ATMEL_ADB021D:
12398         case FLASH_5717VENDOR_ATMEL_45USPT:
12399                 tp->nvram_jedecnum = JEDEC_ATMEL;
12400                 tg3_flag_set(tp, NVRAM_BUFFERED);
12401                 tg3_flag_set(tp, FLASH);
12402
12403                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12404                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12405                         /* Detect size with tg3_nvram_get_size() */
12406                         break;
12407                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12408                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12409                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12410                         break;
12411                 default:
12412                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12413                         break;
12414                 }
12415                 break;
12416         case FLASH_5717VENDOR_ST_M_M25PE10:
12417         case FLASH_5717VENDOR_ST_A_M25PE10:
12418         case FLASH_5717VENDOR_ST_M_M45PE10:
12419         case FLASH_5717VENDOR_ST_A_M45PE10:
12420         case FLASH_5717VENDOR_ST_M_M25PE20:
12421         case FLASH_5717VENDOR_ST_A_M25PE20:
12422         case FLASH_5717VENDOR_ST_M_M45PE20:
12423         case FLASH_5717VENDOR_ST_A_M45PE20:
12424         case FLASH_5717VENDOR_ST_25USPT:
12425         case FLASH_5717VENDOR_ST_45USPT:
12426                 tp->nvram_jedecnum = JEDEC_ST;
12427                 tg3_flag_set(tp, NVRAM_BUFFERED);
12428                 tg3_flag_set(tp, FLASH);
12429
12430                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12431                 case FLASH_5717VENDOR_ST_M_M25PE20:
12432                 case FLASH_5717VENDOR_ST_M_M45PE20:
12433                         /* Detect size with tg3_nvram_get_size() */
12434                         break;
12435                 case FLASH_5717VENDOR_ST_A_M25PE20:
12436                 case FLASH_5717VENDOR_ST_A_M45PE20:
12437                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12438                         break;
12439                 default:
12440                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12441                         break;
12442                 }
12443                 break;
12444         default:
12445                 tg3_flag_set(tp, NO_NVRAM);
12446                 return;
12447         }
12448
12449         tg3_nvram_get_pagesize(tp, nvcfg1);
12450         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12451                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12452 }
12453
12454 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12455 {
12456         u32 nvcfg1, nvmpinstrp;
12457
12458         nvcfg1 = tr32(NVRAM_CFG1);
12459         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12460
12461         switch (nvmpinstrp) {
12462         case FLASH_5720_EEPROM_HD:
12463         case FLASH_5720_EEPROM_LD:
12464                 tp->nvram_jedecnum = JEDEC_ATMEL;
12465                 tg3_flag_set(tp, NVRAM_BUFFERED);
12466
12467                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12468                 tw32(NVRAM_CFG1, nvcfg1);
12469                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12470                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12471                 else
12472                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12473                 return;
12474         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12475         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12476         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12477         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12478         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12479         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12480         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12481         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12482         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12483         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12484         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12485         case FLASH_5720VENDOR_ATMEL_45USPT:
12486                 tp->nvram_jedecnum = JEDEC_ATMEL;
12487                 tg3_flag_set(tp, NVRAM_BUFFERED);
12488                 tg3_flag_set(tp, FLASH);
12489
12490                 switch (nvmpinstrp) {
12491                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12492                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12493                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12494                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12495                         break;
12496                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12497                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12498                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12499                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12500                         break;
12501                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12502                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12503                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12504                         break;
12505                 default:
12506                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12507                         break;
12508                 }
12509                 break;
12510         case FLASH_5720VENDOR_M_ST_M25PE10:
12511         case FLASH_5720VENDOR_M_ST_M45PE10:
12512         case FLASH_5720VENDOR_A_ST_M25PE10:
12513         case FLASH_5720VENDOR_A_ST_M45PE10:
12514         case FLASH_5720VENDOR_M_ST_M25PE20:
12515         case FLASH_5720VENDOR_M_ST_M45PE20:
12516         case FLASH_5720VENDOR_A_ST_M25PE20:
12517         case FLASH_5720VENDOR_A_ST_M45PE20:
12518         case FLASH_5720VENDOR_M_ST_M25PE40:
12519         case FLASH_5720VENDOR_M_ST_M45PE40:
12520         case FLASH_5720VENDOR_A_ST_M25PE40:
12521         case FLASH_5720VENDOR_A_ST_M45PE40:
12522         case FLASH_5720VENDOR_M_ST_M25PE80:
12523         case FLASH_5720VENDOR_M_ST_M45PE80:
12524         case FLASH_5720VENDOR_A_ST_M25PE80:
12525         case FLASH_5720VENDOR_A_ST_M45PE80:
12526         case FLASH_5720VENDOR_ST_25USPT:
12527         case FLASH_5720VENDOR_ST_45USPT:
12528                 tp->nvram_jedecnum = JEDEC_ST;
12529                 tg3_flag_set(tp, NVRAM_BUFFERED);
12530                 tg3_flag_set(tp, FLASH);
12531
12532                 switch (nvmpinstrp) {
12533                 case FLASH_5720VENDOR_M_ST_M25PE20:
12534                 case FLASH_5720VENDOR_M_ST_M45PE20:
12535                 case FLASH_5720VENDOR_A_ST_M25PE20:
12536                 case FLASH_5720VENDOR_A_ST_M45PE20:
12537                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12538                         break;
12539                 case FLASH_5720VENDOR_M_ST_M25PE40:
12540                 case FLASH_5720VENDOR_M_ST_M45PE40:
12541                 case FLASH_5720VENDOR_A_ST_M25PE40:
12542                 case FLASH_5720VENDOR_A_ST_M45PE40:
12543                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12544                         break;
12545                 case FLASH_5720VENDOR_M_ST_M25PE80:
12546                 case FLASH_5720VENDOR_M_ST_M45PE80:
12547                 case FLASH_5720VENDOR_A_ST_M25PE80:
12548                 case FLASH_5720VENDOR_A_ST_M45PE80:
12549                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12550                         break;
12551                 default:
12552                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12553                         break;
12554                 }
12555                 break;
12556         default:
12557                 tg3_flag_set(tp, NO_NVRAM);
12558                 return;
12559         }
12560
12561         tg3_nvram_get_pagesize(tp, nvcfg1);
12562         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12563                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12564 }
12565
12566 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12567 static void __devinit tg3_nvram_init(struct tg3 *tp)
12568 {
12569         tw32_f(GRC_EEPROM_ADDR,
12570              (EEPROM_ADDR_FSM_RESET |
12571               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12572                EEPROM_ADDR_CLKPERD_SHIFT)));
12573
12574         msleep(1);
12575
12576         /* Enable seeprom accesses. */
12577         tw32_f(GRC_LOCAL_CTRL,
12578              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12579         udelay(100);
12580
12581         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12582             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12583                 tg3_flag_set(tp, NVRAM);
12584
12585                 if (tg3_nvram_lock(tp)) {
12586                         netdev_warn(tp->dev,
12587                                     "Cannot get nvram lock, %s failed\n",
12588                                     __func__);
12589                         return;
12590                 }
12591                 tg3_enable_nvram_access(tp);
12592
12593                 tp->nvram_size = 0;
12594
12595                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12596                         tg3_get_5752_nvram_info(tp);
12597                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12598                         tg3_get_5755_nvram_info(tp);
12599                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12600                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12601                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12602                         tg3_get_5787_nvram_info(tp);
12603                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12604                         tg3_get_5761_nvram_info(tp);
12605                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12606                         tg3_get_5906_nvram_info(tp);
12607                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12608                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12609                         tg3_get_57780_nvram_info(tp);
12610                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12611                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12612                         tg3_get_5717_nvram_info(tp);
12613                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12614                         tg3_get_5720_nvram_info(tp);
12615                 else
12616                         tg3_get_nvram_info(tp);
12617
12618                 if (tp->nvram_size == 0)
12619                         tg3_get_nvram_size(tp);
12620
12621                 tg3_disable_nvram_access(tp);
12622                 tg3_nvram_unlock(tp);
12623
12624         } else {
12625                 tg3_flag_clear(tp, NVRAM);
12626                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12627
12628                 tg3_get_eeprom_size(tp);
12629         }
12630 }
12631
12632 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12633                                     u32 offset, u32 len, u8 *buf)
12634 {
12635         int i, j, rc = 0;
12636         u32 val;
12637
12638         for (i = 0; i < len; i += 4) {
12639                 u32 addr;
12640                 __be32 data;
12641
12642                 addr = offset + i;
12643
12644                 memcpy(&data, buf + i, 4);
12645
12646                 /*
12647                  * The SEEPROM interface expects the data to always be opposite
12648                  * the native endian format.  We accomplish this by reversing
12649                  * all the operations that would have been performed on the
12650                  * data from a call to tg3_nvram_read_be32().
12651                  */
12652                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12653
12654                 val = tr32(GRC_EEPROM_ADDR);
12655                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12656
12657                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12658                         EEPROM_ADDR_READ);
12659                 tw32(GRC_EEPROM_ADDR, val |
12660                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12661                         (addr & EEPROM_ADDR_ADDR_MASK) |
12662                         EEPROM_ADDR_START |
12663                         EEPROM_ADDR_WRITE);
12664
12665                 for (j = 0; j < 1000; j++) {
12666                         val = tr32(GRC_EEPROM_ADDR);
12667
12668                         if (val & EEPROM_ADDR_COMPLETE)
12669                                 break;
12670                         msleep(1);
12671                 }
12672                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12673                         rc = -EBUSY;
12674                         break;
12675                 }
12676         }
12677
12678         return rc;
12679 }
12680
12681 /* offset and length are dword aligned */
12682 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12683                 u8 *buf)
12684 {
12685         int ret = 0;
12686         u32 pagesize = tp->nvram_pagesize;
12687         u32 pagemask = pagesize - 1;
12688         u32 nvram_cmd;
12689         u8 *tmp;
12690
12691         tmp = kmalloc(pagesize, GFP_KERNEL);
12692         if (tmp == NULL)
12693                 return -ENOMEM;
12694
12695         while (len) {
12696                 int j;
12697                 u32 phy_addr, page_off, size;
12698
12699                 phy_addr = offset & ~pagemask;
12700
12701                 for (j = 0; j < pagesize; j += 4) {
12702                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12703                                                   (__be32 *) (tmp + j));
12704                         if (ret)
12705                                 break;
12706                 }
12707                 if (ret)
12708                         break;
12709
12710                 page_off = offset & pagemask;
12711                 size = pagesize;
12712                 if (len < size)
12713                         size = len;
12714
12715                 len -= size;
12716
12717                 memcpy(tmp + page_off, buf, size);
12718
12719                 offset = offset + (pagesize - page_off);
12720
12721                 tg3_enable_nvram_access(tp);
12722
12723                 /*
12724                  * Before we can erase the flash page, we need
12725                  * to issue a special "write enable" command.
12726                  */
12727                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12728
12729                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12730                         break;
12731
12732                 /* Erase the target page */
12733                 tw32(NVRAM_ADDR, phy_addr);
12734
12735                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12736                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12737
12738                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12739                         break;
12740
12741                 /* Issue another write enable to start the write. */
12742                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12743
12744                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12745                         break;
12746
12747                 for (j = 0; j < pagesize; j += 4) {
12748                         __be32 data;
12749
12750                         data = *((__be32 *) (tmp + j));
12751
12752                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12753
12754                         tw32(NVRAM_ADDR, phy_addr + j);
12755
12756                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12757                                 NVRAM_CMD_WR;
12758
12759                         if (j == 0)
12760                                 nvram_cmd |= NVRAM_CMD_FIRST;
12761                         else if (j == (pagesize - 4))
12762                                 nvram_cmd |= NVRAM_CMD_LAST;
12763
12764                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12765                                 break;
12766                 }
12767                 if (ret)
12768                         break;
12769         }
12770
12771         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12772         tg3_nvram_exec_cmd(tp, nvram_cmd);
12773
12774         kfree(tmp);
12775
12776         return ret;
12777 }
12778
12779 /* offset and length are dword aligned */
12780 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12781                 u8 *buf)
12782 {
12783         int i, ret = 0;
12784
12785         for (i = 0; i < len; i += 4, offset += 4) {
12786                 u32 page_off, phy_addr, nvram_cmd;
12787                 __be32 data;
12788
12789                 memcpy(&data, buf + i, 4);
12790                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12791
12792                 page_off = offset % tp->nvram_pagesize;
12793
12794                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12795
12796                 tw32(NVRAM_ADDR, phy_addr);
12797
12798                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12799
12800                 if (page_off == 0 || i == 0)
12801                         nvram_cmd |= NVRAM_CMD_FIRST;
12802                 if (page_off == (tp->nvram_pagesize - 4))
12803                         nvram_cmd |= NVRAM_CMD_LAST;
12804
12805                 if (i == (len - 4))
12806                         nvram_cmd |= NVRAM_CMD_LAST;
12807
12808                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12809                     !tg3_flag(tp, 5755_PLUS) &&
12810                     (tp->nvram_jedecnum == JEDEC_ST) &&
12811                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12812
12813                         if ((ret = tg3_nvram_exec_cmd(tp,
12814                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12815                                 NVRAM_CMD_DONE)))
12816
12817                                 break;
12818                 }
12819                 if (!tg3_flag(tp, FLASH)) {
12820                         /* We always do complete word writes to eeprom. */
12821                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12822                 }
12823
12824                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12825                         break;
12826         }
12827         return ret;
12828 }
12829
12830 /* offset and length are dword aligned */
12831 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12832 {
12833         int ret;
12834
12835         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12836                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12837                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12838                 udelay(40);
12839         }
12840
12841         if (!tg3_flag(tp, NVRAM)) {
12842                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12843         } else {
12844                 u32 grc_mode;
12845
12846                 ret = tg3_nvram_lock(tp);
12847                 if (ret)
12848                         return ret;
12849
12850                 tg3_enable_nvram_access(tp);
12851                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12852                         tw32(NVRAM_WRITE1, 0x406);
12853
12854                 grc_mode = tr32(GRC_MODE);
12855                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12856
12857                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12858                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12859                                 buf);
12860                 } else {
12861                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12862                                 buf);
12863                 }
12864
12865                 grc_mode = tr32(GRC_MODE);
12866                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12867
12868                 tg3_disable_nvram_access(tp);
12869                 tg3_nvram_unlock(tp);
12870         }
12871
12872         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12873                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12874                 udelay(40);
12875         }
12876
12877         return ret;
12878 }
12879
12880 struct subsys_tbl_ent {
12881         u16 subsys_vendor, subsys_devid;
12882         u32 phy_id;
12883 };
12884
12885 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12886         /* Broadcom boards. */
12887         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12888           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12889         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12890           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12891         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12892           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12893         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12894           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12895         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12896           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12897         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12898           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12899         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12900           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12901         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12902           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12903         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12904           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12905         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12906           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12907         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12908           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12909
12910         /* 3com boards. */
12911         { TG3PCI_SUBVENDOR_ID_3COM,
12912           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12913         { TG3PCI_SUBVENDOR_ID_3COM,
12914           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12915         { TG3PCI_SUBVENDOR_ID_3COM,
12916           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12917         { TG3PCI_SUBVENDOR_ID_3COM,
12918           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12919         { TG3PCI_SUBVENDOR_ID_3COM,
12920           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12921
12922         /* DELL boards. */
12923         { TG3PCI_SUBVENDOR_ID_DELL,
12924           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12925         { TG3PCI_SUBVENDOR_ID_DELL,
12926           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12927         { TG3PCI_SUBVENDOR_ID_DELL,
12928           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12929         { TG3PCI_SUBVENDOR_ID_DELL,
12930           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12931
12932         /* Compaq boards. */
12933         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12934           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12935         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12936           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12937         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12938           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12939         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12940           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12941         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12942           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12943
12944         /* IBM boards. */
12945         { TG3PCI_SUBVENDOR_ID_IBM,
12946           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12947 };
12948
12949 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12950 {
12951         int i;
12952
12953         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12954                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12955                      tp->pdev->subsystem_vendor) &&
12956                     (subsys_id_to_phy_id[i].subsys_devid ==
12957                      tp->pdev->subsystem_device))
12958                         return &subsys_id_to_phy_id[i];
12959         }
12960         return NULL;
12961 }
12962
12963 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12964 {
12965         u32 val;
12966
12967         tp->phy_id = TG3_PHY_ID_INVALID;
12968         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12969
12970         /* Assume an onboard device and WOL capable by default.  */
12971         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12972         tg3_flag_set(tp, WOL_CAP);
12973
12974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12975                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12976                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12977                         tg3_flag_set(tp, IS_NIC);
12978                 }
12979                 val = tr32(VCPU_CFGSHDW);
12980                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12981                         tg3_flag_set(tp, ASPM_WORKAROUND);
12982                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12983                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12984                         tg3_flag_set(tp, WOL_ENABLE);
12985                         device_set_wakeup_enable(&tp->pdev->dev, true);
12986                 }
12987                 goto done;
12988         }
12989
12990         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12991         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12992                 u32 nic_cfg, led_cfg;
12993                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12994                 int eeprom_phy_serdes = 0;
12995
12996                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12997                 tp->nic_sram_data_cfg = nic_cfg;
12998
12999                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13000                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13001                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13002                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13003                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13004                     (ver > 0) && (ver < 0x100))
13005                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13006
13007                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13008                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13009
13010                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13011                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13012                         eeprom_phy_serdes = 1;
13013
13014                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13015                 if (nic_phy_id != 0) {
13016                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13017                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13018
13019                         eeprom_phy_id  = (id1 >> 16) << 10;
13020                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13021                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13022                 } else
13023                         eeprom_phy_id = 0;
13024
13025                 tp->phy_id = eeprom_phy_id;
13026                 if (eeprom_phy_serdes) {
13027                         if (!tg3_flag(tp, 5705_PLUS))
13028                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13029                         else
13030                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13031                 }
13032
13033                 if (tg3_flag(tp, 5750_PLUS))
13034                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13035                                     SHASTA_EXT_LED_MODE_MASK);
13036                 else
13037                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13038
13039                 switch (led_cfg) {
13040                 default:
13041                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13042                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13043                         break;
13044
13045                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13046                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13047                         break;
13048
13049                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13050                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13051
13052                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13053                          * read on some older 5700/5701 bootcode.
13054                          */
13055                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13056                             ASIC_REV_5700 ||
13057                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13058                             ASIC_REV_5701)
13059                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13060
13061                         break;
13062
13063                 case SHASTA_EXT_LED_SHARED:
13064                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13065                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13066                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13067                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13068                                                  LED_CTRL_MODE_PHY_2);
13069                         break;
13070
13071                 case SHASTA_EXT_LED_MAC:
13072                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13073                         break;
13074
13075                 case SHASTA_EXT_LED_COMBO:
13076                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13077                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13078                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13079                                                  LED_CTRL_MODE_PHY_2);
13080                         break;
13081
13082                 }
13083
13084                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13085                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13086                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13087                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13088
13089                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13090                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13091
13092                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13093                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13094                         if ((tp->pdev->subsystem_vendor ==
13095                              PCI_VENDOR_ID_ARIMA) &&
13096                             (tp->pdev->subsystem_device == 0x205a ||
13097                              tp->pdev->subsystem_device == 0x2063))
13098                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13099                 } else {
13100                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13101                         tg3_flag_set(tp, IS_NIC);
13102                 }
13103
13104                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13105                         tg3_flag_set(tp, ENABLE_ASF);
13106                         if (tg3_flag(tp, 5750_PLUS))
13107                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13108                 }
13109
13110                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13111                     tg3_flag(tp, 5750_PLUS))
13112                         tg3_flag_set(tp, ENABLE_APE);
13113
13114                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13115                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13116                         tg3_flag_clear(tp, WOL_CAP);
13117
13118                 if (tg3_flag(tp, WOL_CAP) &&
13119                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13120                         tg3_flag_set(tp, WOL_ENABLE);
13121                         device_set_wakeup_enable(&tp->pdev->dev, true);
13122                 }
13123
13124                 if (cfg2 & (1 << 17))
13125                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13126
13127                 /* serdes signal pre-emphasis in register 0x590 set by */
13128                 /* bootcode if bit 18 is set */
13129                 if (cfg2 & (1 << 18))
13130                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13131
13132                 if ((tg3_flag(tp, 57765_PLUS) ||
13133                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13134                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13135                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13136                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13137
13138                 if (tg3_flag(tp, PCI_EXPRESS) &&
13139                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13140                     !tg3_flag(tp, 57765_PLUS)) {
13141                         u32 cfg3;
13142
13143                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13144                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13145                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13146                 }
13147
13148                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13149                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13150                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13151                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13152                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13153                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13154         }
13155 done:
13156         if (tg3_flag(tp, WOL_CAP))
13157                 device_set_wakeup_enable(&tp->pdev->dev,
13158                                          tg3_flag(tp, WOL_ENABLE));
13159         else
13160                 device_set_wakeup_capable(&tp->pdev->dev, false);
13161 }
13162
13163 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13164 {
13165         int i;
13166         u32 val;
13167
13168         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13169         tw32(OTP_CTRL, cmd);
13170
13171         /* Wait for up to 1 ms for command to execute. */
13172         for (i = 0; i < 100; i++) {
13173                 val = tr32(OTP_STATUS);
13174                 if (val & OTP_STATUS_CMD_DONE)
13175                         break;
13176                 udelay(10);
13177         }
13178
13179         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13180 }
13181
13182 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13183  * configuration is a 32-bit value that straddles the alignment boundary.
13184  * We do two 32-bit reads and then shift and merge the results.
13185  */
13186 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13187 {
13188         u32 bhalf_otp, thalf_otp;
13189
13190         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13191
13192         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13193                 return 0;
13194
13195         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13196
13197         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13198                 return 0;
13199
13200         thalf_otp = tr32(OTP_READ_DATA);
13201
13202         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13203
13204         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13205                 return 0;
13206
13207         bhalf_otp = tr32(OTP_READ_DATA);
13208
13209         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13210 }
13211
13212 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13213 {
13214         u32 adv = ADVERTISED_Autoneg |
13215                   ADVERTISED_Pause;
13216
13217         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13218                 adv |= ADVERTISED_1000baseT_Half |
13219                        ADVERTISED_1000baseT_Full;
13220
13221         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13222                 adv |= ADVERTISED_100baseT_Half |
13223                        ADVERTISED_100baseT_Full |
13224                        ADVERTISED_10baseT_Half |
13225                        ADVERTISED_10baseT_Full |
13226                        ADVERTISED_TP;
13227         else
13228                 adv |= ADVERTISED_FIBRE;
13229
13230         tp->link_config.advertising = adv;
13231         tp->link_config.speed = SPEED_INVALID;
13232         tp->link_config.duplex = DUPLEX_INVALID;
13233         tp->link_config.autoneg = AUTONEG_ENABLE;
13234         tp->link_config.active_speed = SPEED_INVALID;
13235         tp->link_config.active_duplex = DUPLEX_INVALID;
13236         tp->link_config.orig_speed = SPEED_INVALID;
13237         tp->link_config.orig_duplex = DUPLEX_INVALID;
13238         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13239 }
13240
13241 static int __devinit tg3_phy_probe(struct tg3 *tp)
13242 {
13243         u32 hw_phy_id_1, hw_phy_id_2;
13244         u32 hw_phy_id, hw_phy_id_masked;
13245         int err;
13246
13247         /* flow control autonegotiation is default behavior */
13248         tg3_flag_set(tp, PAUSE_AUTONEG);
13249         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13250
13251         if (tg3_flag(tp, USE_PHYLIB))
13252                 return tg3_phy_init(tp);
13253
13254         /* Reading the PHY ID register can conflict with ASF
13255          * firmware access to the PHY hardware.
13256          */
13257         err = 0;
13258         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13259                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13260         } else {
13261                 /* Now read the physical PHY_ID from the chip and verify
13262                  * that it is sane.  If it doesn't look good, we fall back
13263                  * to either the hard-coded table based PHY_ID and failing
13264                  * that the value found in the eeprom area.
13265                  */
13266                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13267                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13268
13269                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13270                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13271                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13272
13273                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13274         }
13275
13276         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13277                 tp->phy_id = hw_phy_id;
13278                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13279                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13280                 else
13281                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13282         } else {
13283                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13284                         /* Do nothing, phy ID already set up in
13285                          * tg3_get_eeprom_hw_cfg().
13286                          */
13287                 } else {
13288                         struct subsys_tbl_ent *p;
13289
13290                         /* No eeprom signature?  Try the hardcoded
13291                          * subsys device table.
13292                          */
13293                         p = tg3_lookup_by_subsys(tp);
13294                         if (!p)
13295                                 return -ENODEV;
13296
13297                         tp->phy_id = p->phy_id;
13298                         if (!tp->phy_id ||
13299                             tp->phy_id == TG3_PHY_ID_BCM8002)
13300                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13301                 }
13302         }
13303
13304         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13305             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13306              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13307              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13308               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13309              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13310               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13311                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13312
13313         tg3_phy_init_link_config(tp);
13314
13315         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13316             !tg3_flag(tp, ENABLE_APE) &&
13317             !tg3_flag(tp, ENABLE_ASF)) {
13318                 u32 bmsr, mask;
13319
13320                 tg3_readphy(tp, MII_BMSR, &bmsr);
13321                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13322                     (bmsr & BMSR_LSTATUS))
13323                         goto skip_phy_reset;
13324
13325                 err = tg3_phy_reset(tp);
13326                 if (err)
13327                         return err;
13328
13329                 tg3_phy_set_wirespeed(tp);
13330
13331                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13332                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13333                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13334                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13335                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13336                                             tp->link_config.flowctrl);
13337
13338                         tg3_writephy(tp, MII_BMCR,
13339                                      BMCR_ANENABLE | BMCR_ANRESTART);
13340                 }
13341         }
13342
13343 skip_phy_reset:
13344         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13345                 err = tg3_init_5401phy_dsp(tp);
13346                 if (err)
13347                         return err;
13348
13349                 err = tg3_init_5401phy_dsp(tp);
13350         }
13351
13352         return err;
13353 }
13354
13355 static void __devinit tg3_read_vpd(struct tg3 *tp)
13356 {
13357         u8 *vpd_data;
13358         unsigned int block_end, rosize, len;
13359         u32 vpdlen;
13360         int j, i = 0;
13361
13362         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13363         if (!vpd_data)
13364                 goto out_no_vpd;
13365
13366         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13367         if (i < 0)
13368                 goto out_not_found;
13369
13370         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13371         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13372         i += PCI_VPD_LRDT_TAG_SIZE;
13373
13374         if (block_end > vpdlen)
13375                 goto out_not_found;
13376
13377         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13378                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13379         if (j > 0) {
13380                 len = pci_vpd_info_field_size(&vpd_data[j]);
13381
13382                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13383                 if (j + len > block_end || len != 4 ||
13384                     memcmp(&vpd_data[j], "1028", 4))
13385                         goto partno;
13386
13387                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13388                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13389                 if (j < 0)
13390                         goto partno;
13391
13392                 len = pci_vpd_info_field_size(&vpd_data[j]);
13393
13394                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13395                 if (j + len > block_end)
13396                         goto partno;
13397
13398                 memcpy(tp->fw_ver, &vpd_data[j], len);
13399                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13400         }
13401
13402 partno:
13403         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13404                                       PCI_VPD_RO_KEYWORD_PARTNO);
13405         if (i < 0)
13406                 goto out_not_found;
13407
13408         len = pci_vpd_info_field_size(&vpd_data[i]);
13409
13410         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13411         if (len > TG3_BPN_SIZE ||
13412             (len + i) > vpdlen)
13413                 goto out_not_found;
13414
13415         memcpy(tp->board_part_number, &vpd_data[i], len);
13416
13417 out_not_found:
13418         kfree(vpd_data);
13419         if (tp->board_part_number[0])
13420                 return;
13421
13422 out_no_vpd:
13423         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13424                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13425                         strcpy(tp->board_part_number, "BCM5717");
13426                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13427                         strcpy(tp->board_part_number, "BCM5718");
13428                 else
13429                         goto nomatch;
13430         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13431                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13432                         strcpy(tp->board_part_number, "BCM57780");
13433                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13434                         strcpy(tp->board_part_number, "BCM57760");
13435                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13436                         strcpy(tp->board_part_number, "BCM57790");
13437                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13438                         strcpy(tp->board_part_number, "BCM57788");
13439                 else
13440                         goto nomatch;
13441         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13442                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13443                         strcpy(tp->board_part_number, "BCM57761");
13444                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13445                         strcpy(tp->board_part_number, "BCM57765");
13446                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13447                         strcpy(tp->board_part_number, "BCM57781");
13448                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13449                         strcpy(tp->board_part_number, "BCM57785");
13450                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13451                         strcpy(tp->board_part_number, "BCM57791");
13452                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13453                         strcpy(tp->board_part_number, "BCM57795");
13454                 else
13455                         goto nomatch;
13456         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13457                 strcpy(tp->board_part_number, "BCM95906");
13458         } else {
13459 nomatch:
13460                 strcpy(tp->board_part_number, "none");
13461         }
13462 }
13463
13464 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13465 {
13466         u32 val;
13467
13468         if (tg3_nvram_read(tp, offset, &val) ||
13469             (val & 0xfc000000) != 0x0c000000 ||
13470             tg3_nvram_read(tp, offset + 4, &val) ||
13471             val != 0)
13472                 return 0;
13473
13474         return 1;
13475 }
13476
13477 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13478 {
13479         u32 val, offset, start, ver_offset;
13480         int i, dst_off;
13481         bool newver = false;
13482
13483         if (tg3_nvram_read(tp, 0xc, &offset) ||
13484             tg3_nvram_read(tp, 0x4, &start))
13485                 return;
13486
13487         offset = tg3_nvram_logical_addr(tp, offset);
13488
13489         if (tg3_nvram_read(tp, offset, &val))
13490                 return;
13491
13492         if ((val & 0xfc000000) == 0x0c000000) {
13493                 if (tg3_nvram_read(tp, offset + 4, &val))
13494                         return;
13495
13496                 if (val == 0)
13497                         newver = true;
13498         }
13499
13500         dst_off = strlen(tp->fw_ver);
13501
13502         if (newver) {
13503                 if (TG3_VER_SIZE - dst_off < 16 ||
13504                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13505                         return;
13506
13507                 offset = offset + ver_offset - start;
13508                 for (i = 0; i < 16; i += 4) {
13509                         __be32 v;
13510                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13511                                 return;
13512
13513                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13514                 }
13515         } else {
13516                 u32 major, minor;
13517
13518                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13519                         return;
13520
13521                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13522                         TG3_NVM_BCVER_MAJSFT;
13523                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13524                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13525                          "v%d.%02d", major, minor);
13526         }
13527 }
13528
13529 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13530 {
13531         u32 val, major, minor;
13532
13533         /* Use native endian representation */
13534         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13535                 return;
13536
13537         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13538                 TG3_NVM_HWSB_CFG1_MAJSFT;
13539         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13540                 TG3_NVM_HWSB_CFG1_MINSFT;
13541
13542         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13543 }
13544
13545 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13546 {
13547         u32 offset, major, minor, build;
13548
13549         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13550
13551         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13552                 return;
13553
13554         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13555         case TG3_EEPROM_SB_REVISION_0:
13556                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13557                 break;
13558         case TG3_EEPROM_SB_REVISION_2:
13559                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13560                 break;
13561         case TG3_EEPROM_SB_REVISION_3:
13562                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13563                 break;
13564         case TG3_EEPROM_SB_REVISION_4:
13565                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13566                 break;
13567         case TG3_EEPROM_SB_REVISION_5:
13568                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13569                 break;
13570         case TG3_EEPROM_SB_REVISION_6:
13571                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13572                 break;
13573         default:
13574                 return;
13575         }
13576
13577         if (tg3_nvram_read(tp, offset, &val))
13578                 return;
13579
13580         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13581                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13582         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13583                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13584         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13585
13586         if (minor > 99 || build > 26)
13587                 return;
13588
13589         offset = strlen(tp->fw_ver);
13590         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13591                  " v%d.%02d", major, minor);
13592
13593         if (build > 0) {
13594                 offset = strlen(tp->fw_ver);
13595                 if (offset < TG3_VER_SIZE - 1)
13596                         tp->fw_ver[offset] = 'a' + build - 1;
13597         }
13598 }
13599
13600 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13601 {
13602         u32 val, offset, start;
13603         int i, vlen;
13604
13605         for (offset = TG3_NVM_DIR_START;
13606              offset < TG3_NVM_DIR_END;
13607              offset += TG3_NVM_DIRENT_SIZE) {
13608                 if (tg3_nvram_read(tp, offset, &val))
13609                         return;
13610
13611                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13612                         break;
13613         }
13614
13615         if (offset == TG3_NVM_DIR_END)
13616                 return;
13617
13618         if (!tg3_flag(tp, 5705_PLUS))
13619                 start = 0x08000000;
13620         else if (tg3_nvram_read(tp, offset - 4, &start))
13621                 return;
13622
13623         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13624             !tg3_fw_img_is_valid(tp, offset) ||
13625             tg3_nvram_read(tp, offset + 8, &val))
13626                 return;
13627
13628         offset += val - start;
13629
13630         vlen = strlen(tp->fw_ver);
13631
13632         tp->fw_ver[vlen++] = ',';
13633         tp->fw_ver[vlen++] = ' ';
13634
13635         for (i = 0; i < 4; i++) {
13636                 __be32 v;
13637                 if (tg3_nvram_read_be32(tp, offset, &v))
13638                         return;
13639
13640                 offset += sizeof(v);
13641
13642                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13643                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13644                         break;
13645                 }
13646
13647                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13648                 vlen += sizeof(v);
13649         }
13650 }
13651
13652 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13653 {
13654         int vlen;
13655         u32 apedata;
13656         char *fwtype;
13657
13658         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13659                 return;
13660
13661         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13662         if (apedata != APE_SEG_SIG_MAGIC)
13663                 return;
13664
13665         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13666         if (!(apedata & APE_FW_STATUS_READY))
13667                 return;
13668
13669         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13670
13671         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13672                 tg3_flag_set(tp, APE_HAS_NCSI);
13673                 fwtype = "NCSI";
13674         } else {
13675                 fwtype = "DASH";
13676         }
13677
13678         vlen = strlen(tp->fw_ver);
13679
13680         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13681                  fwtype,
13682                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13683                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13684                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13685                  (apedata & APE_FW_VERSION_BLDMSK));
13686 }
13687
13688 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13689 {
13690         u32 val;
13691         bool vpd_vers = false;
13692
13693         if (tp->fw_ver[0] != 0)
13694                 vpd_vers = true;
13695
13696         if (tg3_flag(tp, NO_NVRAM)) {
13697                 strcat(tp->fw_ver, "sb");
13698                 return;
13699         }
13700
13701         if (tg3_nvram_read(tp, 0, &val))
13702                 return;
13703
13704         if (val == TG3_EEPROM_MAGIC)
13705                 tg3_read_bc_ver(tp);
13706         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13707                 tg3_read_sb_ver(tp, val);
13708         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13709                 tg3_read_hwsb_ver(tp);
13710         else
13711                 return;
13712
13713         if (vpd_vers)
13714                 goto done;
13715
13716         if (tg3_flag(tp, ENABLE_APE)) {
13717                 if (tg3_flag(tp, ENABLE_ASF))
13718                         tg3_read_dash_ver(tp);
13719         } else if (tg3_flag(tp, ENABLE_ASF)) {
13720                 tg3_read_mgmtfw_ver(tp);
13721         }
13722
13723 done:
13724         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13725 }
13726
13727 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13728
13729 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13730 {
13731         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13732                 return TG3_RX_RET_MAX_SIZE_5717;
13733         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13734                 return TG3_RX_RET_MAX_SIZE_5700;
13735         else
13736                 return TG3_RX_RET_MAX_SIZE_5705;
13737 }
13738
13739 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13740         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13741         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13742         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13743         { },
13744 };
13745
13746 static int __devinit tg3_get_invariants(struct tg3 *tp)
13747 {
13748         u32 misc_ctrl_reg;
13749         u32 pci_state_reg, grc_misc_cfg;
13750         u32 val;
13751         u16 pci_cmd;
13752         int err;
13753
13754         /* Force memory write invalidate off.  If we leave it on,
13755          * then on 5700_BX chips we have to enable a workaround.
13756          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13757          * to match the cacheline size.  The Broadcom driver have this
13758          * workaround but turns MWI off all the times so never uses
13759          * it.  This seems to suggest that the workaround is insufficient.
13760          */
13761         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13762         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13763         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13764
13765         /* Important! -- Make sure register accesses are byteswapped
13766          * correctly.  Also, for those chips that require it, make
13767          * sure that indirect register accesses are enabled before
13768          * the first operation.
13769          */
13770         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13771                               &misc_ctrl_reg);
13772         tp->misc_host_ctrl |= (misc_ctrl_reg &
13773                                MISC_HOST_CTRL_CHIPREV);
13774         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13775                                tp->misc_host_ctrl);
13776
13777         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13778                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13780                 u32 prod_id_asic_rev;
13781
13782                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13783                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13784                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13785                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13786                         pci_read_config_dword(tp->pdev,
13787                                               TG3PCI_GEN2_PRODID_ASICREV,
13788                                               &prod_id_asic_rev);
13789                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13790                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13791                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13792                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13793                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13794                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13795                         pci_read_config_dword(tp->pdev,
13796                                               TG3PCI_GEN15_PRODID_ASICREV,
13797                                               &prod_id_asic_rev);
13798                 else
13799                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13800                                               &prod_id_asic_rev);
13801
13802                 tp->pci_chip_rev_id = prod_id_asic_rev;
13803         }
13804
13805         /* Wrong chip ID in 5752 A0. This code can be removed later
13806          * as A0 is not in production.
13807          */
13808         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13809                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13810
13811         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13812          * we need to disable memory and use config. cycles
13813          * only to access all registers. The 5702/03 chips
13814          * can mistakenly decode the special cycles from the
13815          * ICH chipsets as memory write cycles, causing corruption
13816          * of register and memory space. Only certain ICH bridges
13817          * will drive special cycles with non-zero data during the
13818          * address phase which can fall within the 5703's address
13819          * range. This is not an ICH bug as the PCI spec allows
13820          * non-zero address during special cycles. However, only
13821          * these ICH bridges are known to drive non-zero addresses
13822          * during special cycles.
13823          *
13824          * Since special cycles do not cross PCI bridges, we only
13825          * enable this workaround if the 5703 is on the secondary
13826          * bus of these ICH bridges.
13827          */
13828         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13829             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13830                 static struct tg3_dev_id {
13831                         u32     vendor;
13832                         u32     device;
13833                         u32     rev;
13834                 } ich_chipsets[] = {
13835                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13836                           PCI_ANY_ID },
13837                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13838                           PCI_ANY_ID },
13839                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13840                           0xa },
13841                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13842                           PCI_ANY_ID },
13843                         { },
13844                 };
13845                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13846                 struct pci_dev *bridge = NULL;
13847
13848                 while (pci_id->vendor != 0) {
13849                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13850                                                 bridge);
13851                         if (!bridge) {
13852                                 pci_id++;
13853                                 continue;
13854                         }
13855                         if (pci_id->rev != PCI_ANY_ID) {
13856                                 if (bridge->revision > pci_id->rev)
13857                                         continue;
13858                         }
13859                         if (bridge->subordinate &&
13860                             (bridge->subordinate->number ==
13861                              tp->pdev->bus->number)) {
13862                                 tg3_flag_set(tp, ICH_WORKAROUND);
13863                                 pci_dev_put(bridge);
13864                                 break;
13865                         }
13866                 }
13867         }
13868
13869         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13870                 static struct tg3_dev_id {
13871                         u32     vendor;
13872                         u32     device;
13873                 } bridge_chipsets[] = {
13874                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13875                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13876                         { },
13877                 };
13878                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13879                 struct pci_dev *bridge = NULL;
13880
13881                 while (pci_id->vendor != 0) {
13882                         bridge = pci_get_device(pci_id->vendor,
13883                                                 pci_id->device,
13884                                                 bridge);
13885                         if (!bridge) {
13886                                 pci_id++;
13887                                 continue;
13888                         }
13889                         if (bridge->subordinate &&
13890                             (bridge->subordinate->number <=
13891                              tp->pdev->bus->number) &&
13892                             (bridge->subordinate->subordinate >=
13893                              tp->pdev->bus->number)) {
13894                                 tg3_flag_set(tp, 5701_DMA_BUG);
13895                                 pci_dev_put(bridge);
13896                                 break;
13897                         }
13898                 }
13899         }
13900
13901         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13902          * DMA addresses > 40-bit. This bridge may have other additional
13903          * 57xx devices behind it in some 4-port NIC designs for example.
13904          * Any tg3 device found behind the bridge will also need the 40-bit
13905          * DMA workaround.
13906          */
13907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13908             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13909                 tg3_flag_set(tp, 5780_CLASS);
13910                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13911                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13912         } else {
13913                 struct pci_dev *bridge = NULL;
13914
13915                 do {
13916                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13917                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13918                                                 bridge);
13919                         if (bridge && bridge->subordinate &&
13920                             (bridge->subordinate->number <=
13921                              tp->pdev->bus->number) &&
13922                             (bridge->subordinate->subordinate >=
13923                              tp->pdev->bus->number)) {
13924                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13925                                 pci_dev_put(bridge);
13926                                 break;
13927                         }
13928                 } while (bridge);
13929         }
13930
13931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13933                 tp->pdev_peer = tg3_find_peer(tp);
13934
13935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13938                 tg3_flag_set(tp, 5717_PLUS);
13939
13940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13941             tg3_flag(tp, 5717_PLUS))
13942                 tg3_flag_set(tp, 57765_PLUS);
13943
13944         /* Intentionally exclude ASIC_REV_5906 */
13945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13946             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13951             tg3_flag(tp, 57765_PLUS))
13952                 tg3_flag_set(tp, 5755_PLUS);
13953
13954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13955             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13956             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13957             tg3_flag(tp, 5755_PLUS) ||
13958             tg3_flag(tp, 5780_CLASS))
13959                 tg3_flag_set(tp, 5750_PLUS);
13960
13961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13962             tg3_flag(tp, 5750_PLUS))
13963                 tg3_flag_set(tp, 5705_PLUS);
13964
13965         /* Determine TSO capabilities */
13966         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13967                 ; /* Do nothing. HW bug. */
13968         else if (tg3_flag(tp, 57765_PLUS))
13969                 tg3_flag_set(tp, HW_TSO_3);
13970         else if (tg3_flag(tp, 5755_PLUS) ||
13971                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13972                 tg3_flag_set(tp, HW_TSO_2);
13973         else if (tg3_flag(tp, 5750_PLUS)) {
13974                 tg3_flag_set(tp, HW_TSO_1);
13975                 tg3_flag_set(tp, TSO_BUG);
13976                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13977                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13978                         tg3_flag_clear(tp, TSO_BUG);
13979         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13980                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13981                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13982                         tg3_flag_set(tp, TSO_BUG);
13983                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13984                         tp->fw_needed = FIRMWARE_TG3TSO5;
13985                 else
13986                         tp->fw_needed = FIRMWARE_TG3TSO;
13987         }
13988
13989         /* Selectively allow TSO based on operating conditions */
13990         if (tg3_flag(tp, HW_TSO_1) ||
13991             tg3_flag(tp, HW_TSO_2) ||
13992             tg3_flag(tp, HW_TSO_3) ||
13993             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13994                 tg3_flag_set(tp, TSO_CAPABLE);
13995         else {
13996                 tg3_flag_clear(tp, TSO_CAPABLE);
13997                 tg3_flag_clear(tp, TSO_BUG);
13998                 tp->fw_needed = NULL;
13999         }
14000
14001         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14002                 tp->fw_needed = FIRMWARE_TG3;
14003
14004         tp->irq_max = 1;
14005
14006         if (tg3_flag(tp, 5750_PLUS)) {
14007                 tg3_flag_set(tp, SUPPORT_MSI);
14008                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14009                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14010                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14011                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14012                      tp->pdev_peer == tp->pdev))
14013                         tg3_flag_clear(tp, SUPPORT_MSI);
14014
14015                 if (tg3_flag(tp, 5755_PLUS) ||
14016                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14017                         tg3_flag_set(tp, 1SHOT_MSI);
14018                 }
14019
14020                 if (tg3_flag(tp, 57765_PLUS)) {
14021                         tg3_flag_set(tp, SUPPORT_MSIX);
14022                         tp->irq_max = TG3_IRQ_MAX_VECS;
14023                 }
14024         }
14025
14026         if (tg3_flag(tp, 5755_PLUS))
14027                 tg3_flag_set(tp, SHORT_DMA_BUG);
14028
14029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14030                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14031
14032         if (tg3_flag(tp, 5717_PLUS))
14033                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14034
14035         if (tg3_flag(tp, 57765_PLUS) &&
14036             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14037                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14038
14039         if (!tg3_flag(tp, 5705_PLUS) ||
14040             tg3_flag(tp, 5780_CLASS) ||
14041             tg3_flag(tp, USE_JUMBO_BDFLAG))
14042                 tg3_flag_set(tp, JUMBO_CAPABLE);
14043
14044         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14045                               &pci_state_reg);
14046
14047         if (pci_is_pcie(tp->pdev)) {
14048                 u16 lnkctl;
14049
14050                 tg3_flag_set(tp, PCI_EXPRESS);
14051
14052                 tp->pcie_readrq = 4096;
14053                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14054                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14055                         tp->pcie_readrq = 2048;
14056
14057                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14058
14059                 pci_read_config_word(tp->pdev,
14060                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14061                                      &lnkctl);
14062                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14064                             ASIC_REV_5906) {
14065                                 tg3_flag_clear(tp, HW_TSO_2);
14066                                 tg3_flag_clear(tp, TSO_CAPABLE);
14067                         }
14068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14069                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14070                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14071                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14072                                 tg3_flag_set(tp, CLKREQ_BUG);
14073                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14074                         tg3_flag_set(tp, L1PLLPD_EN);
14075                 }
14076         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14077                 /* BCM5785 devices are effectively PCIe devices, and should
14078                  * follow PCIe codepaths, but do not have a PCIe capabilities
14079                  * section.
14080                  */
14081                 tg3_flag_set(tp, PCI_EXPRESS);
14082         } else if (!tg3_flag(tp, 5705_PLUS) ||
14083                    tg3_flag(tp, 5780_CLASS)) {
14084                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14085                 if (!tp->pcix_cap) {
14086                         dev_err(&tp->pdev->dev,
14087                                 "Cannot find PCI-X capability, aborting\n");
14088                         return -EIO;
14089                 }
14090
14091                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14092                         tg3_flag_set(tp, PCIX_MODE);
14093         }
14094
14095         /* If we have an AMD 762 or VIA K8T800 chipset, write
14096          * reordering to the mailbox registers done by the host
14097          * controller can cause major troubles.  We read back from
14098          * every mailbox register write to force the writes to be
14099          * posted to the chip in order.
14100          */
14101         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14102             !tg3_flag(tp, PCI_EXPRESS))
14103                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14104
14105         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14106                              &tp->pci_cacheline_sz);
14107         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14108                              &tp->pci_lat_timer);
14109         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14110             tp->pci_lat_timer < 64) {
14111                 tp->pci_lat_timer = 64;
14112                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14113                                       tp->pci_lat_timer);
14114         }
14115
14116         /* Important! -- It is critical that the PCI-X hw workaround
14117          * situation is decided before the first MMIO register access.
14118          */
14119         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14120                 /* 5700 BX chips need to have their TX producer index
14121                  * mailboxes written twice to workaround a bug.
14122                  */
14123                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14124
14125                 /* If we are in PCI-X mode, enable register write workaround.
14126                  *
14127                  * The workaround is to use indirect register accesses
14128                  * for all chip writes not to mailbox registers.
14129                  */
14130                 if (tg3_flag(tp, PCIX_MODE)) {
14131                         u32 pm_reg;
14132
14133                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14134
14135                         /* The chip can have it's power management PCI config
14136                          * space registers clobbered due to this bug.
14137                          * So explicitly force the chip into D0 here.
14138                          */
14139                         pci_read_config_dword(tp->pdev,
14140                                               tp->pm_cap + PCI_PM_CTRL,
14141                                               &pm_reg);
14142                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14143                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14144                         pci_write_config_dword(tp->pdev,
14145                                                tp->pm_cap + PCI_PM_CTRL,
14146                                                pm_reg);
14147
14148                         /* Also, force SERR#/PERR# in PCI command. */
14149                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14150                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14151                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14152                 }
14153         }
14154
14155         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14156                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14157         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14158                 tg3_flag_set(tp, PCI_32BIT);
14159
14160         /* Chip-specific fixup from Broadcom driver */
14161         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14162             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14163                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14164                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14165         }
14166
14167         /* Default fast path register access methods */
14168         tp->read32 = tg3_read32;
14169         tp->write32 = tg3_write32;
14170         tp->read32_mbox = tg3_read32;
14171         tp->write32_mbox = tg3_write32;
14172         tp->write32_tx_mbox = tg3_write32;
14173         tp->write32_rx_mbox = tg3_write32;
14174
14175         /* Various workaround register access methods */
14176         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14177                 tp->write32 = tg3_write_indirect_reg32;
14178         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14179                  (tg3_flag(tp, PCI_EXPRESS) &&
14180                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14181                 /*
14182                  * Back to back register writes can cause problems on these
14183                  * chips, the workaround is to read back all reg writes
14184                  * except those to mailbox regs.
14185                  *
14186                  * See tg3_write_indirect_reg32().
14187                  */
14188                 tp->write32 = tg3_write_flush_reg32;
14189         }
14190
14191         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14192                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14193                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14194                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14195         }
14196
14197         if (tg3_flag(tp, ICH_WORKAROUND)) {
14198                 tp->read32 = tg3_read_indirect_reg32;
14199                 tp->write32 = tg3_write_indirect_reg32;
14200                 tp->read32_mbox = tg3_read_indirect_mbox;
14201                 tp->write32_mbox = tg3_write_indirect_mbox;
14202                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14203                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14204
14205                 iounmap(tp->regs);
14206                 tp->regs = NULL;
14207
14208                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14209                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14210                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14211         }
14212         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14213                 tp->read32_mbox = tg3_read32_mbox_5906;
14214                 tp->write32_mbox = tg3_write32_mbox_5906;
14215                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14216                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14217         }
14218
14219         if (tp->write32 == tg3_write_indirect_reg32 ||
14220             (tg3_flag(tp, PCIX_MODE) &&
14221              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14222               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14223                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14224
14225         /* The memory arbiter has to be enabled in order for SRAM accesses
14226          * to succeed.  Normally on powerup the tg3 chip firmware will make
14227          * sure it is enabled, but other entities such as system netboot
14228          * code might disable it.
14229          */
14230         val = tr32(MEMARB_MODE);
14231         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14232
14233         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14235             tg3_flag(tp, 5780_CLASS)) {
14236                 if (tg3_flag(tp, PCIX_MODE)) {
14237                         pci_read_config_dword(tp->pdev,
14238                                               tp->pcix_cap + PCI_X_STATUS,
14239                                               &val);
14240                         tp->pci_fn = val & 0x7;
14241                 }
14242         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14243                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14244                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14245                     NIC_SRAM_CPMUSTAT_SIG) {
14246                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14247                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14248                 }
14249         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14250                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14251                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14252                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14253                     NIC_SRAM_CPMUSTAT_SIG) {
14254                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14255                                      TG3_CPMU_STATUS_FSHFT_5719;
14256                 }
14257         }
14258
14259         /* Get eeprom hw config before calling tg3_set_power_state().
14260          * In particular, the TG3_FLAG_IS_NIC flag must be
14261          * determined before calling tg3_set_power_state() so that
14262          * we know whether or not to switch out of Vaux power.
14263          * When the flag is set, it means that GPIO1 is used for eeprom
14264          * write protect and also implies that it is a LOM where GPIOs
14265          * are not used to switch power.
14266          */
14267         tg3_get_eeprom_hw_cfg(tp);
14268
14269         if (tg3_flag(tp, ENABLE_APE)) {
14270                 /* Allow reads and writes to the
14271                  * APE register and memory space.
14272                  */
14273                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14274                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14275                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14276                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14277                                        pci_state_reg);
14278
14279                 tg3_ape_lock_init(tp);
14280         }
14281
14282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14283             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14284             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14285             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14286             tg3_flag(tp, 57765_PLUS))
14287                 tg3_flag_set(tp, CPMU_PRESENT);
14288
14289         /* Set up tp->grc_local_ctrl before calling
14290          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14291          * will bring 5700's external PHY out of reset.
14292          * It is also used as eeprom write protect on LOMs.
14293          */
14294         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14295         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14296             tg3_flag(tp, EEPROM_WRITE_PROT))
14297                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14298                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14299         /* Unused GPIO3 must be driven as output on 5752 because there
14300          * are no pull-up resistors on unused GPIO pins.
14301          */
14302         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14303                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14304
14305         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14306             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14307             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14308                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14309
14310         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14311             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14312                 /* Turn off the debug UART. */
14313                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14314                 if (tg3_flag(tp, IS_NIC))
14315                         /* Keep VMain power. */
14316                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14317                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14318         }
14319
14320         /* Switch out of Vaux if it is a NIC */
14321         tg3_pwrsrc_switch_to_vmain(tp);
14322
14323         /* Derive initial jumbo mode from MTU assigned in
14324          * ether_setup() via the alloc_etherdev() call
14325          */
14326         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14327                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14328
14329         /* Determine WakeOnLan speed to use. */
14330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14331             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14332             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14333             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14334                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14335         } else {
14336                 tg3_flag_set(tp, WOL_SPEED_100MB);
14337         }
14338
14339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14340                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14341
14342         /* A few boards don't want Ethernet@WireSpeed phy feature */
14343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14344             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14345              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14346              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14347             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14348             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14349                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14350
14351         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14352             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14353                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14354         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14355                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14356
14357         if (tg3_flag(tp, 5705_PLUS) &&
14358             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14359             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14360             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14361             !tg3_flag(tp, 57765_PLUS)) {
14362                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14363                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14364                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14365                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14366                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14367                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14368                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14369                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14370                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14371                 } else
14372                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14373         }
14374
14375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14376             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14377                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14378                 if (tp->phy_otp == 0)
14379                         tp->phy_otp = TG3_OTP_DEFAULT;
14380         }
14381
14382         if (tg3_flag(tp, CPMU_PRESENT))
14383                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14384         else
14385                 tp->mi_mode = MAC_MI_MODE_BASE;
14386
14387         tp->coalesce_mode = 0;
14388         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14389             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14390                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14391
14392         /* Set these bits to enable statistics workaround. */
14393         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14394             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14395             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14396                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14397                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14398         }
14399
14400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14401             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14402                 tg3_flag_set(tp, USE_PHYLIB);
14403
14404         err = tg3_mdio_init(tp);
14405         if (err)
14406                 return err;
14407
14408         /* Initialize data/descriptor byte/word swapping. */
14409         val = tr32(GRC_MODE);
14410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14411                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14412                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14413                         GRC_MODE_B2HRX_ENABLE |
14414                         GRC_MODE_HTX2B_ENABLE |
14415                         GRC_MODE_HOST_STACKUP);
14416         else
14417                 val &= GRC_MODE_HOST_STACKUP;
14418
14419         tw32(GRC_MODE, val | tp->grc_mode);
14420
14421         tg3_switch_clocks(tp);
14422
14423         /* Clear this out for sanity. */
14424         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14425
14426         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14427                               &pci_state_reg);
14428         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14429             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14430                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14431
14432                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14433                     chiprevid == CHIPREV_ID_5701_B0 ||
14434                     chiprevid == CHIPREV_ID_5701_B2 ||
14435                     chiprevid == CHIPREV_ID_5701_B5) {
14436                         void __iomem *sram_base;
14437
14438                         /* Write some dummy words into the SRAM status block
14439                          * area, see if it reads back correctly.  If the return
14440                          * value is bad, force enable the PCIX workaround.
14441                          */
14442                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14443
14444                         writel(0x00000000, sram_base);
14445                         writel(0x00000000, sram_base + 4);
14446                         writel(0xffffffff, sram_base + 4);
14447                         if (readl(sram_base) != 0x00000000)
14448                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14449                 }
14450         }
14451
14452         udelay(50);
14453         tg3_nvram_init(tp);
14454
14455         grc_misc_cfg = tr32(GRC_MISC_CFG);
14456         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14457
14458         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14459             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14460              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14461                 tg3_flag_set(tp, IS_5788);
14462
14463         if (!tg3_flag(tp, IS_5788) &&
14464             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14465                 tg3_flag_set(tp, TAGGED_STATUS);
14466         if (tg3_flag(tp, TAGGED_STATUS)) {
14467                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14468                                       HOSTCC_MODE_CLRTICK_TXBD);
14469
14470                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14471                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14472                                        tp->misc_host_ctrl);
14473         }
14474
14475         /* Preserve the APE MAC_MODE bits */
14476         if (tg3_flag(tp, ENABLE_APE))
14477                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14478         else
14479                 tp->mac_mode = 0;
14480
14481         /* these are limited to 10/100 only */
14482         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14483              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14484             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14485              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14486              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14487               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14488               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14489             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14490              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14491               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14492               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14493             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14494             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14495             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14496             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14497                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14498
14499         err = tg3_phy_probe(tp);
14500         if (err) {
14501                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14502                 /* ... but do not return immediately ... */
14503                 tg3_mdio_fini(tp);
14504         }
14505
14506         tg3_read_vpd(tp);
14507         tg3_read_fw_ver(tp);
14508
14509         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14510                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14511         } else {
14512                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14513                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14514                 else
14515                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14516         }
14517
14518         /* 5700 {AX,BX} chips have a broken status block link
14519          * change bit implementation, so we must use the
14520          * status register in those cases.
14521          */
14522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14523                 tg3_flag_set(tp, USE_LINKCHG_REG);
14524         else
14525                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14526
14527         /* The led_ctrl is set during tg3_phy_probe, here we might
14528          * have to force the link status polling mechanism based
14529          * upon subsystem IDs.
14530          */
14531         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14533             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14534                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14535                 tg3_flag_set(tp, USE_LINKCHG_REG);
14536         }
14537
14538         /* For all SERDES we poll the MAC status register. */
14539         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14540                 tg3_flag_set(tp, POLL_SERDES);
14541         else
14542                 tg3_flag_clear(tp, POLL_SERDES);
14543
14544         tp->rx_offset = NET_IP_ALIGN;
14545         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14546         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14547             tg3_flag(tp, PCIX_MODE)) {
14548                 tp->rx_offset = 0;
14549 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14550                 tp->rx_copy_thresh = ~(u16)0;
14551 #endif
14552         }
14553
14554         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14555         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14556         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14557
14558         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14559
14560         /* Increment the rx prod index on the rx std ring by at most
14561          * 8 for these chips to workaround hw errata.
14562          */
14563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14566                 tp->rx_std_max_post = 8;
14567
14568         if (tg3_flag(tp, ASPM_WORKAROUND))
14569                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14570                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14571
14572         return err;
14573 }
14574
14575 #ifdef CONFIG_SPARC
14576 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14577 {
14578         struct net_device *dev = tp->dev;
14579         struct pci_dev *pdev = tp->pdev;
14580         struct device_node *dp = pci_device_to_OF_node(pdev);
14581         const unsigned char *addr;
14582         int len;
14583
14584         addr = of_get_property(dp, "local-mac-address", &len);
14585         if (addr && len == 6) {
14586                 memcpy(dev->dev_addr, addr, 6);
14587                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14588                 return 0;
14589         }
14590         return -ENODEV;
14591 }
14592
14593 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14594 {
14595         struct net_device *dev = tp->dev;
14596
14597         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14598         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14599         return 0;
14600 }
14601 #endif
14602
14603 static int __devinit tg3_get_device_address(struct tg3 *tp)
14604 {
14605         struct net_device *dev = tp->dev;
14606         u32 hi, lo, mac_offset;
14607         int addr_ok = 0;
14608
14609 #ifdef CONFIG_SPARC
14610         if (!tg3_get_macaddr_sparc(tp))
14611                 return 0;
14612 #endif
14613
14614         mac_offset = 0x7c;
14615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14616             tg3_flag(tp, 5780_CLASS)) {
14617                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14618                         mac_offset = 0xcc;
14619                 if (tg3_nvram_lock(tp))
14620                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14621                 else
14622                         tg3_nvram_unlock(tp);
14623         } else if (tg3_flag(tp, 5717_PLUS)) {
14624                 if (tp->pci_fn & 1)
14625                         mac_offset = 0xcc;
14626                 if (tp->pci_fn > 1)
14627                         mac_offset += 0x18c;
14628         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14629                 mac_offset = 0x10;
14630
14631         /* First try to get it from MAC address mailbox. */
14632         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14633         if ((hi >> 16) == 0x484b) {
14634                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14635                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14636
14637                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14638                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14639                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14640                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14641                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14642
14643                 /* Some old bootcode may report a 0 MAC address in SRAM */
14644                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14645         }
14646         if (!addr_ok) {
14647                 /* Next, try NVRAM. */
14648                 if (!tg3_flag(tp, NO_NVRAM) &&
14649                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14650                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14651                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14652                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14653                 }
14654                 /* Finally just fetch it out of the MAC control regs. */
14655                 else {
14656                         hi = tr32(MAC_ADDR_0_HIGH);
14657                         lo = tr32(MAC_ADDR_0_LOW);
14658
14659                         dev->dev_addr[5] = lo & 0xff;
14660                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14661                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14662                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14663                         dev->dev_addr[1] = hi & 0xff;
14664                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14665                 }
14666         }
14667
14668         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14669 #ifdef CONFIG_SPARC
14670                 if (!tg3_get_default_macaddr_sparc(tp))
14671                         return 0;
14672 #endif
14673                 return -EINVAL;
14674         }
14675         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14676         return 0;
14677 }
14678
14679 #define BOUNDARY_SINGLE_CACHELINE       1
14680 #define BOUNDARY_MULTI_CACHELINE        2
14681
14682 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14683 {
14684         int cacheline_size;
14685         u8 byte;
14686         int goal;
14687
14688         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14689         if (byte == 0)
14690                 cacheline_size = 1024;
14691         else
14692                 cacheline_size = (int) byte * 4;
14693
14694         /* On 5703 and later chips, the boundary bits have no
14695          * effect.
14696          */
14697         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14698             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14699             !tg3_flag(tp, PCI_EXPRESS))
14700                 goto out;
14701
14702 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14703         goal = BOUNDARY_MULTI_CACHELINE;
14704 #else
14705 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14706         goal = BOUNDARY_SINGLE_CACHELINE;
14707 #else
14708         goal = 0;
14709 #endif
14710 #endif
14711
14712         if (tg3_flag(tp, 57765_PLUS)) {
14713                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14714                 goto out;
14715         }
14716
14717         if (!goal)
14718                 goto out;
14719
14720         /* PCI controllers on most RISC systems tend to disconnect
14721          * when a device tries to burst across a cache-line boundary.
14722          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14723          *
14724          * Unfortunately, for PCI-E there are only limited
14725          * write-side controls for this, and thus for reads
14726          * we will still get the disconnects.  We'll also waste
14727          * these PCI cycles for both read and write for chips
14728          * other than 5700 and 5701 which do not implement the
14729          * boundary bits.
14730          */
14731         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14732                 switch (cacheline_size) {
14733                 case 16:
14734                 case 32:
14735                 case 64:
14736                 case 128:
14737                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14738                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14739                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14740                         } else {
14741                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14742                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14743                         }
14744                         break;
14745
14746                 case 256:
14747                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14748                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14749                         break;
14750
14751                 default:
14752                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14753                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14754                         break;
14755                 }
14756         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14757                 switch (cacheline_size) {
14758                 case 16:
14759                 case 32:
14760                 case 64:
14761                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14762                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14763                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14764                                 break;
14765                         }
14766                         /* fallthrough */
14767                 case 128:
14768                 default:
14769                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14770                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14771                         break;
14772                 }
14773         } else {
14774                 switch (cacheline_size) {
14775                 case 16:
14776                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14777                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14778                                         DMA_RWCTRL_WRITE_BNDRY_16);
14779                                 break;
14780                         }
14781                         /* fallthrough */
14782                 case 32:
14783                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14784                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14785                                         DMA_RWCTRL_WRITE_BNDRY_32);
14786                                 break;
14787                         }
14788                         /* fallthrough */
14789                 case 64:
14790                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14791                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14792                                         DMA_RWCTRL_WRITE_BNDRY_64);
14793                                 break;
14794                         }
14795                         /* fallthrough */
14796                 case 128:
14797                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14798                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14799                                         DMA_RWCTRL_WRITE_BNDRY_128);
14800                                 break;
14801                         }
14802                         /* fallthrough */
14803                 case 256:
14804                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14805                                 DMA_RWCTRL_WRITE_BNDRY_256);
14806                         break;
14807                 case 512:
14808                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14809                                 DMA_RWCTRL_WRITE_BNDRY_512);
14810                         break;
14811                 case 1024:
14812                 default:
14813                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14814                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14815                         break;
14816                 }
14817         }
14818
14819 out:
14820         return val;
14821 }
14822
14823 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14824 {
14825         struct tg3_internal_buffer_desc test_desc;
14826         u32 sram_dma_descs;
14827         int i, ret;
14828
14829         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14830
14831         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14832         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14833         tw32(RDMAC_STATUS, 0);
14834         tw32(WDMAC_STATUS, 0);
14835
14836         tw32(BUFMGR_MODE, 0);
14837         tw32(FTQ_RESET, 0);
14838
14839         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14840         test_desc.addr_lo = buf_dma & 0xffffffff;
14841         test_desc.nic_mbuf = 0x00002100;
14842         test_desc.len = size;
14843
14844         /*
14845          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14846          * the *second* time the tg3 driver was getting loaded after an
14847          * initial scan.
14848          *
14849          * Broadcom tells me:
14850          *   ...the DMA engine is connected to the GRC block and a DMA
14851          *   reset may affect the GRC block in some unpredictable way...
14852          *   The behavior of resets to individual blocks has not been tested.
14853          *
14854          * Broadcom noted the GRC reset will also reset all sub-components.
14855          */
14856         if (to_device) {
14857                 test_desc.cqid_sqid = (13 << 8) | 2;
14858
14859                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14860                 udelay(40);
14861         } else {
14862                 test_desc.cqid_sqid = (16 << 8) | 7;
14863
14864                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14865                 udelay(40);
14866         }
14867         test_desc.flags = 0x00000005;
14868
14869         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14870                 u32 val;
14871
14872                 val = *(((u32 *)&test_desc) + i);
14873                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14874                                        sram_dma_descs + (i * sizeof(u32)));
14875                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14876         }
14877         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14878
14879         if (to_device)
14880                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14881         else
14882                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14883
14884         ret = -ENODEV;
14885         for (i = 0; i < 40; i++) {
14886                 u32 val;
14887
14888                 if (to_device)
14889                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14890                 else
14891                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14892                 if ((val & 0xffff) == sram_dma_descs) {
14893                         ret = 0;
14894                         break;
14895                 }
14896
14897                 udelay(100);
14898         }
14899
14900         return ret;
14901 }
14902
14903 #define TEST_BUFFER_SIZE        0x2000
14904
14905 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14906         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14907         { },
14908 };
14909
14910 static int __devinit tg3_test_dma(struct tg3 *tp)
14911 {
14912         dma_addr_t buf_dma;
14913         u32 *buf, saved_dma_rwctrl;
14914         int ret = 0;
14915
14916         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14917                                  &buf_dma, GFP_KERNEL);
14918         if (!buf) {
14919                 ret = -ENOMEM;
14920                 goto out_nofree;
14921         }
14922
14923         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14924                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14925
14926         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14927
14928         if (tg3_flag(tp, 57765_PLUS))
14929                 goto out;
14930
14931         if (tg3_flag(tp, PCI_EXPRESS)) {
14932                 /* DMA read watermark not used on PCIE */
14933                 tp->dma_rwctrl |= 0x00180000;
14934         } else if (!tg3_flag(tp, PCIX_MODE)) {
14935                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14936                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14937                         tp->dma_rwctrl |= 0x003f0000;
14938                 else
14939                         tp->dma_rwctrl |= 0x003f000f;
14940         } else {
14941                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14942                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14943                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14944                         u32 read_water = 0x7;
14945
14946                         /* If the 5704 is behind the EPB bridge, we can
14947                          * do the less restrictive ONE_DMA workaround for
14948                          * better performance.
14949                          */
14950                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14951                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14952                                 tp->dma_rwctrl |= 0x8000;
14953                         else if (ccval == 0x6 || ccval == 0x7)
14954                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14955
14956                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14957                                 read_water = 4;
14958                         /* Set bit 23 to enable PCIX hw bug fix */
14959                         tp->dma_rwctrl |=
14960                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14961                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14962                                 (1 << 23);
14963                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14964                         /* 5780 always in PCIX mode */
14965                         tp->dma_rwctrl |= 0x00144000;
14966                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14967                         /* 5714 always in PCIX mode */
14968                         tp->dma_rwctrl |= 0x00148000;
14969                 } else {
14970                         tp->dma_rwctrl |= 0x001b000f;
14971                 }
14972         }
14973
14974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14976                 tp->dma_rwctrl &= 0xfffffff0;
14977
14978         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14979             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14980                 /* Remove this if it causes problems for some boards. */
14981                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14982
14983                 /* On 5700/5701 chips, we need to set this bit.
14984                  * Otherwise the chip will issue cacheline transactions
14985                  * to streamable DMA memory with not all the byte
14986                  * enables turned on.  This is an error on several
14987                  * RISC PCI controllers, in particular sparc64.
14988                  *
14989                  * On 5703/5704 chips, this bit has been reassigned
14990                  * a different meaning.  In particular, it is used
14991                  * on those chips to enable a PCI-X workaround.
14992                  */
14993                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14994         }
14995
14996         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14997
14998 #if 0
14999         /* Unneeded, already done by tg3_get_invariants.  */
15000         tg3_switch_clocks(tp);
15001 #endif
15002
15003         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15004             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15005                 goto out;
15006
15007         /* It is best to perform DMA test with maximum write burst size
15008          * to expose the 5700/5701 write DMA bug.
15009          */
15010         saved_dma_rwctrl = tp->dma_rwctrl;
15011         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15012         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15013
15014         while (1) {
15015                 u32 *p = buf, i;
15016
15017                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15018                         p[i] = i;
15019
15020                 /* Send the buffer to the chip. */
15021                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15022                 if (ret) {
15023                         dev_err(&tp->pdev->dev,
15024                                 "%s: Buffer write failed. err = %d\n",
15025                                 __func__, ret);
15026                         break;
15027                 }
15028
15029 #if 0
15030                 /* validate data reached card RAM correctly. */
15031                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15032                         u32 val;
15033                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15034                         if (le32_to_cpu(val) != p[i]) {
15035                                 dev_err(&tp->pdev->dev,
15036                                         "%s: Buffer corrupted on device! "
15037                                         "(%d != %d)\n", __func__, val, i);
15038                                 /* ret = -ENODEV here? */
15039                         }
15040                         p[i] = 0;
15041                 }
15042 #endif
15043                 /* Now read it back. */
15044                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15045                 if (ret) {
15046                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15047                                 "err = %d\n", __func__, ret);
15048                         break;
15049                 }
15050
15051                 /* Verify it. */
15052                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15053                         if (p[i] == i)
15054                                 continue;
15055
15056                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15057                             DMA_RWCTRL_WRITE_BNDRY_16) {
15058                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15059                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15060                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15061                                 break;
15062                         } else {
15063                                 dev_err(&tp->pdev->dev,
15064                                         "%s: Buffer corrupted on read back! "
15065                                         "(%d != %d)\n", __func__, p[i], i);
15066                                 ret = -ENODEV;
15067                                 goto out;
15068                         }
15069                 }
15070
15071                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15072                         /* Success. */
15073                         ret = 0;
15074                         break;
15075                 }
15076         }
15077         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15078             DMA_RWCTRL_WRITE_BNDRY_16) {
15079                 /* DMA test passed without adjusting DMA boundary,
15080                  * now look for chipsets that are known to expose the
15081                  * DMA bug without failing the test.
15082                  */
15083                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15084                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15085                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15086                 } else {
15087                         /* Safe to use the calculated DMA boundary. */
15088                         tp->dma_rwctrl = saved_dma_rwctrl;
15089                 }
15090
15091                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15092         }
15093
15094 out:
15095         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15096 out_nofree:
15097         return ret;
15098 }
15099
15100 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15101 {
15102         if (tg3_flag(tp, 57765_PLUS)) {
15103                 tp->bufmgr_config.mbuf_read_dma_low_water =
15104                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15105                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15106                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15107                 tp->bufmgr_config.mbuf_high_water =
15108                         DEFAULT_MB_HIGH_WATER_57765;
15109
15110                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15111                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15112                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15113                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15114                 tp->bufmgr_config.mbuf_high_water_jumbo =
15115                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15116         } else if (tg3_flag(tp, 5705_PLUS)) {
15117                 tp->bufmgr_config.mbuf_read_dma_low_water =
15118                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15119                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15120                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15121                 tp->bufmgr_config.mbuf_high_water =
15122                         DEFAULT_MB_HIGH_WATER_5705;
15123                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15124                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15125                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15126                         tp->bufmgr_config.mbuf_high_water =
15127                                 DEFAULT_MB_HIGH_WATER_5906;
15128                 }
15129
15130                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15131                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15132                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15133                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15134                 tp->bufmgr_config.mbuf_high_water_jumbo =
15135                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15136         } else {
15137                 tp->bufmgr_config.mbuf_read_dma_low_water =
15138                         DEFAULT_MB_RDMA_LOW_WATER;
15139                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15140                         DEFAULT_MB_MACRX_LOW_WATER;
15141                 tp->bufmgr_config.mbuf_high_water =
15142                         DEFAULT_MB_HIGH_WATER;
15143
15144                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15145                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15146                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15147                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15148                 tp->bufmgr_config.mbuf_high_water_jumbo =
15149                         DEFAULT_MB_HIGH_WATER_JUMBO;
15150         }
15151
15152         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15153         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15154 }
15155
15156 static char * __devinit tg3_phy_string(struct tg3 *tp)
15157 {
15158         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15159         case TG3_PHY_ID_BCM5400:        return "5400";
15160         case TG3_PHY_ID_BCM5401:        return "5401";
15161         case TG3_PHY_ID_BCM5411:        return "5411";
15162         case TG3_PHY_ID_BCM5701:        return "5701";
15163         case TG3_PHY_ID_BCM5703:        return "5703";
15164         case TG3_PHY_ID_BCM5704:        return "5704";
15165         case TG3_PHY_ID_BCM5705:        return "5705";
15166         case TG3_PHY_ID_BCM5750:        return "5750";
15167         case TG3_PHY_ID_BCM5752:        return "5752";
15168         case TG3_PHY_ID_BCM5714:        return "5714";
15169         case TG3_PHY_ID_BCM5780:        return "5780";
15170         case TG3_PHY_ID_BCM5755:        return "5755";
15171         case TG3_PHY_ID_BCM5787:        return "5787";
15172         case TG3_PHY_ID_BCM5784:        return "5784";
15173         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15174         case TG3_PHY_ID_BCM5906:        return "5906";
15175         case TG3_PHY_ID_BCM5761:        return "5761";
15176         case TG3_PHY_ID_BCM5718C:       return "5718C";
15177         case TG3_PHY_ID_BCM5718S:       return "5718S";
15178         case TG3_PHY_ID_BCM57765:       return "57765";
15179         case TG3_PHY_ID_BCM5719C:       return "5719C";
15180         case TG3_PHY_ID_BCM5720C:       return "5720C";
15181         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15182         case 0:                 return "serdes";
15183         default:                return "unknown";
15184         }
15185 }
15186
15187 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15188 {
15189         if (tg3_flag(tp, PCI_EXPRESS)) {
15190                 strcpy(str, "PCI Express");
15191                 return str;
15192         } else if (tg3_flag(tp, PCIX_MODE)) {
15193                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15194
15195                 strcpy(str, "PCIX:");
15196
15197                 if ((clock_ctrl == 7) ||
15198                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15199                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15200                         strcat(str, "133MHz");
15201                 else if (clock_ctrl == 0)
15202                         strcat(str, "33MHz");
15203                 else if (clock_ctrl == 2)
15204                         strcat(str, "50MHz");
15205                 else if (clock_ctrl == 4)
15206                         strcat(str, "66MHz");
15207                 else if (clock_ctrl == 6)
15208                         strcat(str, "100MHz");
15209         } else {
15210                 strcpy(str, "PCI:");
15211                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15212                         strcat(str, "66MHz");
15213                 else
15214                         strcat(str, "33MHz");
15215         }
15216         if (tg3_flag(tp, PCI_32BIT))
15217                 strcat(str, ":32-bit");
15218         else
15219                 strcat(str, ":64-bit");
15220         return str;
15221 }
15222
15223 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15224 {
15225         struct pci_dev *peer;
15226         unsigned int func, devnr = tp->pdev->devfn & ~7;
15227
15228         for (func = 0; func < 8; func++) {
15229                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15230                 if (peer && peer != tp->pdev)
15231                         break;
15232                 pci_dev_put(peer);
15233         }
15234         /* 5704 can be configured in single-port mode, set peer to
15235          * tp->pdev in that case.
15236          */
15237         if (!peer) {
15238                 peer = tp->pdev;
15239                 return peer;
15240         }
15241
15242         /*
15243          * We don't need to keep the refcount elevated; there's no way
15244          * to remove one half of this device without removing the other
15245          */
15246         pci_dev_put(peer);
15247
15248         return peer;
15249 }
15250
15251 static void __devinit tg3_init_coal(struct tg3 *tp)
15252 {
15253         struct ethtool_coalesce *ec = &tp->coal;
15254
15255         memset(ec, 0, sizeof(*ec));
15256         ec->cmd = ETHTOOL_GCOALESCE;
15257         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15258         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15259         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15260         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15261         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15262         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15263         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15264         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15265         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15266
15267         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15268                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15269                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15270                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15271                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15272                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15273         }
15274
15275         if (tg3_flag(tp, 5705_PLUS)) {
15276                 ec->rx_coalesce_usecs_irq = 0;
15277                 ec->tx_coalesce_usecs_irq = 0;
15278                 ec->stats_block_coalesce_usecs = 0;
15279         }
15280 }
15281
15282 static const struct net_device_ops tg3_netdev_ops = {
15283         .ndo_open               = tg3_open,
15284         .ndo_stop               = tg3_close,
15285         .ndo_start_xmit         = tg3_start_xmit,
15286         .ndo_get_stats64        = tg3_get_stats64,
15287         .ndo_validate_addr      = eth_validate_addr,
15288         .ndo_set_rx_mode        = tg3_set_rx_mode,
15289         .ndo_set_mac_address    = tg3_set_mac_addr,
15290         .ndo_do_ioctl           = tg3_ioctl,
15291         .ndo_tx_timeout         = tg3_tx_timeout,
15292         .ndo_change_mtu         = tg3_change_mtu,
15293         .ndo_fix_features       = tg3_fix_features,
15294         .ndo_set_features       = tg3_set_features,
15295 #ifdef CONFIG_NET_POLL_CONTROLLER
15296         .ndo_poll_controller    = tg3_poll_controller,
15297 #endif
15298 };
15299
15300 static int __devinit tg3_init_one(struct pci_dev *pdev,
15301                                   const struct pci_device_id *ent)
15302 {
15303         struct net_device *dev;
15304         struct tg3 *tp;
15305         int i, err, pm_cap;
15306         u32 sndmbx, rcvmbx, intmbx;
15307         char str[40];
15308         u64 dma_mask, persist_dma_mask;
15309         u32 features = 0;
15310
15311         printk_once(KERN_INFO "%s\n", version);
15312
15313         err = pci_enable_device(pdev);
15314         if (err) {
15315                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15316                 return err;
15317         }
15318
15319         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15320         if (err) {
15321                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15322                 goto err_out_disable_pdev;
15323         }
15324
15325         pci_set_master(pdev);
15326
15327         /* Find power-management capability. */
15328         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15329         if (pm_cap == 0) {
15330                 dev_err(&pdev->dev,
15331                         "Cannot find Power Management capability, aborting\n");
15332                 err = -EIO;
15333                 goto err_out_free_res;
15334         }
15335
15336         err = pci_set_power_state(pdev, PCI_D0);
15337         if (err) {
15338                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15339                 goto err_out_free_res;
15340         }
15341
15342         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15343         if (!dev) {
15344                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15345                 err = -ENOMEM;
15346                 goto err_out_power_down;
15347         }
15348
15349         SET_NETDEV_DEV(dev, &pdev->dev);
15350
15351         tp = netdev_priv(dev);
15352         tp->pdev = pdev;
15353         tp->dev = dev;
15354         tp->pm_cap = pm_cap;
15355         tp->rx_mode = TG3_DEF_RX_MODE;
15356         tp->tx_mode = TG3_DEF_TX_MODE;
15357
15358         if (tg3_debug > 0)
15359                 tp->msg_enable = tg3_debug;
15360         else
15361                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15362
15363         /* The word/byte swap controls here control register access byte
15364          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15365          * setting below.
15366          */
15367         tp->misc_host_ctrl =
15368                 MISC_HOST_CTRL_MASK_PCI_INT |
15369                 MISC_HOST_CTRL_WORD_SWAP |
15370                 MISC_HOST_CTRL_INDIR_ACCESS |
15371                 MISC_HOST_CTRL_PCISTATE_RW;
15372
15373         /* The NONFRM (non-frame) byte/word swap controls take effect
15374          * on descriptor entries, anything which isn't packet data.
15375          *
15376          * The StrongARM chips on the board (one for tx, one for rx)
15377          * are running in big-endian mode.
15378          */
15379         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15380                         GRC_MODE_WSWAP_NONFRM_DATA);
15381 #ifdef __BIG_ENDIAN
15382         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15383 #endif
15384         spin_lock_init(&tp->lock);
15385         spin_lock_init(&tp->indirect_lock);
15386         INIT_WORK(&tp->reset_task, tg3_reset_task);
15387
15388         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15389         if (!tp->regs) {
15390                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15391                 err = -ENOMEM;
15392                 goto err_out_free_dev;
15393         }
15394
15395         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15396             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15397             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15398             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15399             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15400             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15401             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15402             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15403                 tg3_flag_set(tp, ENABLE_APE);
15404                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15405                 if (!tp->aperegs) {
15406                         dev_err(&pdev->dev,
15407                                 "Cannot map APE registers, aborting\n");
15408                         err = -ENOMEM;
15409                         goto err_out_iounmap;
15410                 }
15411         }
15412
15413         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15414         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15415
15416         dev->ethtool_ops = &tg3_ethtool_ops;
15417         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15418         dev->netdev_ops = &tg3_netdev_ops;
15419         dev->irq = pdev->irq;
15420
15421         err = tg3_get_invariants(tp);
15422         if (err) {
15423                 dev_err(&pdev->dev,
15424                         "Problem fetching invariants of chip, aborting\n");
15425                 goto err_out_apeunmap;
15426         }
15427
15428         /* The EPB bridge inside 5714, 5715, and 5780 and any
15429          * device behind the EPB cannot support DMA addresses > 40-bit.
15430          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15431          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15432          * do DMA address check in tg3_start_xmit().
15433          */
15434         if (tg3_flag(tp, IS_5788))
15435                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15436         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15437                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15438 #ifdef CONFIG_HIGHMEM
15439                 dma_mask = DMA_BIT_MASK(64);
15440 #endif
15441         } else
15442                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15443
15444         /* Configure DMA attributes. */
15445         if (dma_mask > DMA_BIT_MASK(32)) {
15446                 err = pci_set_dma_mask(pdev, dma_mask);
15447                 if (!err) {
15448                         features |= NETIF_F_HIGHDMA;
15449                         err = pci_set_consistent_dma_mask(pdev,
15450                                                           persist_dma_mask);
15451                         if (err < 0) {
15452                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15453                                         "DMA for consistent allocations\n");
15454                                 goto err_out_apeunmap;
15455                         }
15456                 }
15457         }
15458         if (err || dma_mask == DMA_BIT_MASK(32)) {
15459                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15460                 if (err) {
15461                         dev_err(&pdev->dev,
15462                                 "No usable DMA configuration, aborting\n");
15463                         goto err_out_apeunmap;
15464                 }
15465         }
15466
15467         tg3_init_bufmgr_config(tp);
15468
15469         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15470
15471         /* 5700 B0 chips do not support checksumming correctly due
15472          * to hardware bugs.
15473          */
15474         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15475                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15476
15477                 if (tg3_flag(tp, 5755_PLUS))
15478                         features |= NETIF_F_IPV6_CSUM;
15479         }
15480
15481         /* TSO is on by default on chips that support hardware TSO.
15482          * Firmware TSO on older chips gives lower performance, so it
15483          * is off by default, but can be enabled using ethtool.
15484          */
15485         if ((tg3_flag(tp, HW_TSO_1) ||
15486              tg3_flag(tp, HW_TSO_2) ||
15487              tg3_flag(tp, HW_TSO_3)) &&
15488             (features & NETIF_F_IP_CSUM))
15489                 features |= NETIF_F_TSO;
15490         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15491                 if (features & NETIF_F_IPV6_CSUM)
15492                         features |= NETIF_F_TSO6;
15493                 if (tg3_flag(tp, HW_TSO_3) ||
15494                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15495                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15496                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15497                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15498                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15499                         features |= NETIF_F_TSO_ECN;
15500         }
15501
15502         dev->features |= features;
15503         dev->vlan_features |= features;
15504
15505         /*
15506          * Add loopback capability only for a subset of devices that support
15507          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15508          * loopback for the remaining devices.
15509          */
15510         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15511             !tg3_flag(tp, CPMU_PRESENT))
15512                 /* Add the loopback capability */
15513                 features |= NETIF_F_LOOPBACK;
15514
15515         dev->hw_features |= features;
15516
15517         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15518             !tg3_flag(tp, TSO_CAPABLE) &&
15519             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15520                 tg3_flag_set(tp, MAX_RXPEND_64);
15521                 tp->rx_pending = 63;
15522         }
15523
15524         err = tg3_get_device_address(tp);
15525         if (err) {
15526                 dev_err(&pdev->dev,
15527                         "Could not obtain valid ethernet address, aborting\n");
15528                 goto err_out_apeunmap;
15529         }
15530
15531         /*
15532          * Reset chip in case UNDI or EFI driver did not shutdown
15533          * DMA self test will enable WDMAC and we'll see (spurious)
15534          * pending DMA on the PCI bus at that point.
15535          */
15536         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15537             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15538                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15539                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15540         }
15541
15542         err = tg3_test_dma(tp);
15543         if (err) {
15544                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15545                 goto err_out_apeunmap;
15546         }
15547
15548         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15549         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15550         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15551         for (i = 0; i < tp->irq_max; i++) {
15552                 struct tg3_napi *tnapi = &tp->napi[i];
15553
15554                 tnapi->tp = tp;
15555                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15556
15557                 tnapi->int_mbox = intmbx;
15558                 if (i <= 4)
15559                         intmbx += 0x8;
15560                 else
15561                         intmbx += 0x4;
15562
15563                 tnapi->consmbox = rcvmbx;
15564                 tnapi->prodmbox = sndmbx;
15565
15566                 if (i)
15567                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15568                 else
15569                         tnapi->coal_now = HOSTCC_MODE_NOW;
15570
15571                 if (!tg3_flag(tp, SUPPORT_MSIX))
15572                         break;
15573
15574                 /*
15575                  * If we support MSIX, we'll be using RSS.  If we're using
15576                  * RSS, the first vector only handles link interrupts and the
15577                  * remaining vectors handle rx and tx interrupts.  Reuse the
15578                  * mailbox values for the next iteration.  The values we setup
15579                  * above are still useful for the single vectored mode.
15580                  */
15581                 if (!i)
15582                         continue;
15583
15584                 rcvmbx += 0x8;
15585
15586                 if (sndmbx & 0x4)
15587                         sndmbx -= 0x4;
15588                 else
15589                         sndmbx += 0xc;
15590         }
15591
15592         tg3_init_coal(tp);
15593
15594         pci_set_drvdata(pdev, dev);
15595
15596         if (tg3_flag(tp, 5717_PLUS)) {
15597                 /* Resume a low-power mode */
15598                 tg3_frob_aux_power(tp, false);
15599         }
15600
15601         err = register_netdev(dev);
15602         if (err) {
15603                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15604                 goto err_out_apeunmap;
15605         }
15606
15607         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15608                     tp->board_part_number,
15609                     tp->pci_chip_rev_id,
15610                     tg3_bus_string(tp, str),
15611                     dev->dev_addr);
15612
15613         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15614                 struct phy_device *phydev;
15615                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15616                 netdev_info(dev,
15617                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15618                             phydev->drv->name, dev_name(&phydev->dev));
15619         } else {
15620                 char *ethtype;
15621
15622                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15623                         ethtype = "10/100Base-TX";
15624                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15625                         ethtype = "1000Base-SX";
15626                 else
15627                         ethtype = "10/100/1000Base-T";
15628
15629                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15630                             "(WireSpeed[%d], EEE[%d])\n",
15631                             tg3_phy_string(tp), ethtype,
15632                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15633                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15634         }
15635
15636         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15637                     (dev->features & NETIF_F_RXCSUM) != 0,
15638                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15639                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15640                     tg3_flag(tp, ENABLE_ASF) != 0,
15641                     tg3_flag(tp, TSO_CAPABLE) != 0);
15642         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15643                     tp->dma_rwctrl,
15644                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15645                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15646
15647         pci_save_state(pdev);
15648
15649         return 0;
15650
15651 err_out_apeunmap:
15652         if (tp->aperegs) {
15653                 iounmap(tp->aperegs);
15654                 tp->aperegs = NULL;
15655         }
15656
15657 err_out_iounmap:
15658         if (tp->regs) {
15659                 iounmap(tp->regs);
15660                 tp->regs = NULL;
15661         }
15662
15663 err_out_free_dev:
15664         free_netdev(dev);
15665
15666 err_out_power_down:
15667         pci_set_power_state(pdev, PCI_D3hot);
15668
15669 err_out_free_res:
15670         pci_release_regions(pdev);
15671
15672 err_out_disable_pdev:
15673         pci_disable_device(pdev);
15674         pci_set_drvdata(pdev, NULL);
15675         return err;
15676 }
15677
15678 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15679 {
15680         struct net_device *dev = pci_get_drvdata(pdev);
15681
15682         if (dev) {
15683                 struct tg3 *tp = netdev_priv(dev);
15684
15685                 if (tp->fw)
15686                         release_firmware(tp->fw);
15687
15688                 cancel_work_sync(&tp->reset_task);
15689
15690                 if (tg3_flag(tp, USE_PHYLIB)) {
15691                         tg3_phy_fini(tp);
15692                         tg3_mdio_fini(tp);
15693                 }
15694
15695                 unregister_netdev(dev);
15696                 if (tp->aperegs) {
15697                         iounmap(tp->aperegs);
15698                         tp->aperegs = NULL;
15699                 }
15700                 if (tp->regs) {
15701                         iounmap(tp->regs);
15702                         tp->regs = NULL;
15703                 }
15704                 free_netdev(dev);
15705                 pci_release_regions(pdev);
15706                 pci_disable_device(pdev);
15707                 pci_set_drvdata(pdev, NULL);
15708         }
15709 }
15710
15711 #ifdef CONFIG_PM_SLEEP
15712 static int tg3_suspend(struct device *device)
15713 {
15714         struct pci_dev *pdev = to_pci_dev(device);
15715         struct net_device *dev = pci_get_drvdata(pdev);
15716         struct tg3 *tp = netdev_priv(dev);
15717         int err;
15718
15719         if (!netif_running(dev))
15720                 return 0;
15721
15722         flush_work_sync(&tp->reset_task);
15723         tg3_phy_stop(tp);
15724         tg3_netif_stop(tp);
15725
15726         del_timer_sync(&tp->timer);
15727
15728         tg3_full_lock(tp, 1);
15729         tg3_disable_ints(tp);
15730         tg3_full_unlock(tp);
15731
15732         netif_device_detach(dev);
15733
15734         tg3_full_lock(tp, 0);
15735         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15736         tg3_flag_clear(tp, INIT_COMPLETE);
15737         tg3_full_unlock(tp);
15738
15739         err = tg3_power_down_prepare(tp);
15740         if (err) {
15741                 int err2;
15742
15743                 tg3_full_lock(tp, 0);
15744
15745                 tg3_flag_set(tp, INIT_COMPLETE);
15746                 err2 = tg3_restart_hw(tp, 1);
15747                 if (err2)
15748                         goto out;
15749
15750                 tp->timer.expires = jiffies + tp->timer_offset;
15751                 add_timer(&tp->timer);
15752
15753                 netif_device_attach(dev);
15754                 tg3_netif_start(tp);
15755
15756 out:
15757                 tg3_full_unlock(tp);
15758
15759                 if (!err2)
15760                         tg3_phy_start(tp);
15761         }
15762
15763         return err;
15764 }
15765
15766 static int tg3_resume(struct device *device)
15767 {
15768         struct pci_dev *pdev = to_pci_dev(device);
15769         struct net_device *dev = pci_get_drvdata(pdev);
15770         struct tg3 *tp = netdev_priv(dev);
15771         int err;
15772
15773         if (!netif_running(dev))
15774                 return 0;
15775
15776         netif_device_attach(dev);
15777
15778         tg3_full_lock(tp, 0);
15779
15780         tg3_flag_set(tp, INIT_COMPLETE);
15781         err = tg3_restart_hw(tp, 1);
15782         if (err)
15783                 goto out;
15784
15785         tp->timer.expires = jiffies + tp->timer_offset;
15786         add_timer(&tp->timer);
15787
15788         tg3_netif_start(tp);
15789
15790 out:
15791         tg3_full_unlock(tp);
15792
15793         if (!err)
15794                 tg3_phy_start(tp);
15795
15796         return err;
15797 }
15798
15799 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15800 #define TG3_PM_OPS (&tg3_pm_ops)
15801
15802 #else
15803
15804 #define TG3_PM_OPS NULL
15805
15806 #endif /* CONFIG_PM_SLEEP */
15807
15808 /**
15809  * tg3_io_error_detected - called when PCI error is detected
15810  * @pdev: Pointer to PCI device
15811  * @state: The current pci connection state
15812  *
15813  * This function is called after a PCI bus error affecting
15814  * this device has been detected.
15815  */
15816 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15817                                               pci_channel_state_t state)
15818 {
15819         struct net_device *netdev = pci_get_drvdata(pdev);
15820         struct tg3 *tp = netdev_priv(netdev);
15821         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15822
15823         netdev_info(netdev, "PCI I/O error detected\n");
15824
15825         rtnl_lock();
15826
15827         if (!netif_running(netdev))
15828                 goto done;
15829
15830         tg3_phy_stop(tp);
15831
15832         tg3_netif_stop(tp);
15833
15834         del_timer_sync(&tp->timer);
15835         tg3_flag_clear(tp, RESTART_TIMER);
15836
15837         /* Want to make sure that the reset task doesn't run */
15838         cancel_work_sync(&tp->reset_task);
15839         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15840         tg3_flag_clear(tp, RESTART_TIMER);
15841
15842         netif_device_detach(netdev);
15843
15844         /* Clean up software state, even if MMIO is blocked */
15845         tg3_full_lock(tp, 0);
15846         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15847         tg3_full_unlock(tp);
15848
15849 done:
15850         if (state == pci_channel_io_perm_failure)
15851                 err = PCI_ERS_RESULT_DISCONNECT;
15852         else
15853                 pci_disable_device(pdev);
15854
15855         rtnl_unlock();
15856
15857         return err;
15858 }
15859
15860 /**
15861  * tg3_io_slot_reset - called after the pci bus has been reset.
15862  * @pdev: Pointer to PCI device
15863  *
15864  * Restart the card from scratch, as if from a cold-boot.
15865  * At this point, the card has exprienced a hard reset,
15866  * followed by fixups by BIOS, and has its config space
15867  * set up identically to what it was at cold boot.
15868  */
15869 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15870 {
15871         struct net_device *netdev = pci_get_drvdata(pdev);
15872         struct tg3 *tp = netdev_priv(netdev);
15873         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15874         int err;
15875
15876         rtnl_lock();
15877
15878         if (pci_enable_device(pdev)) {
15879                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15880                 goto done;
15881         }
15882
15883         pci_set_master(pdev);
15884         pci_restore_state(pdev);
15885         pci_save_state(pdev);
15886
15887         if (!netif_running(netdev)) {
15888                 rc = PCI_ERS_RESULT_RECOVERED;
15889                 goto done;
15890         }
15891
15892         err = tg3_power_up(tp);
15893         if (err)
15894                 goto done;
15895
15896         rc = PCI_ERS_RESULT_RECOVERED;
15897
15898 done:
15899         rtnl_unlock();
15900
15901         return rc;
15902 }
15903
15904 /**
15905  * tg3_io_resume - called when traffic can start flowing again.
15906  * @pdev: Pointer to PCI device
15907  *
15908  * This callback is called when the error recovery driver tells
15909  * us that its OK to resume normal operation.
15910  */
15911 static void tg3_io_resume(struct pci_dev *pdev)
15912 {
15913         struct net_device *netdev = pci_get_drvdata(pdev);
15914         struct tg3 *tp = netdev_priv(netdev);
15915         int err;
15916
15917         rtnl_lock();
15918
15919         if (!netif_running(netdev))
15920                 goto done;
15921
15922         tg3_full_lock(tp, 0);
15923         tg3_flag_set(tp, INIT_COMPLETE);
15924         err = tg3_restart_hw(tp, 1);
15925         tg3_full_unlock(tp);
15926         if (err) {
15927                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15928                 goto done;
15929         }
15930
15931         netif_device_attach(netdev);
15932
15933         tp->timer.expires = jiffies + tp->timer_offset;
15934         add_timer(&tp->timer);
15935
15936         tg3_netif_start(tp);
15937
15938         tg3_phy_start(tp);
15939
15940 done:
15941         rtnl_unlock();
15942 }
15943
15944 static struct pci_error_handlers tg3_err_handler = {
15945         .error_detected = tg3_io_error_detected,
15946         .slot_reset     = tg3_io_slot_reset,
15947         .resume         = tg3_io_resume
15948 };
15949
15950 static struct pci_driver tg3_driver = {
15951         .name           = DRV_MODULE_NAME,
15952         .id_table       = tg3_pci_tbl,
15953         .probe          = tg3_init_one,
15954         .remove         = __devexit_p(tg3_remove_one),
15955         .err_handler    = &tg3_err_handler,
15956         .driver.pm      = TG3_PM_OPS,
15957 };
15958
15959 static int __init tg3_init(void)
15960 {
15961         return pci_register_driver(&tg3_driver);
15962 }
15963
15964 static void __exit tg3_cleanup(void)
15965 {
15966         pci_unregister_driver(&tg3_driver);
15967 }
15968
15969 module_init(tg3_init);
15970 module_exit(tg3_cleanup);