tg3: Add partial fragment unmapping code
[platform/kernel/linux-rpi.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase, bit;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++) {
620                 if (i == TG3_APE_LOCK_GPIO)
621                         continue;
622                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623         }
624
625         /* Clear the correct bit of the GPIO lock too. */
626         if (!tp->pci_fn)
627                 bit = APE_LOCK_GRANT_DRIVER;
628         else
629                 bit = 1 << tp->pci_fn;
630
631         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
632 }
633
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
635 {
636         int i, off;
637         int ret = 0;
638         u32 status, req, gnt, bit;
639
640         if (!tg3_flag(tp, ENABLE_APE))
641                 return 0;
642
643         switch (locknum) {
644         case TG3_APE_LOCK_GPIO:
645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646                         return 0;
647         case TG3_APE_LOCK_GRC:
648         case TG3_APE_LOCK_MEM:
649                 break;
650         default:
651                 return -EINVAL;
652         }
653
654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655                 req = TG3_APE_LOCK_REQ;
656                 gnt = TG3_APE_LOCK_GRANT;
657         } else {
658                 req = TG3_APE_PER_LOCK_REQ;
659                 gnt = TG3_APE_PER_LOCK_GRANT;
660         }
661
662         off = 4 * locknum;
663
664         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665                 bit = APE_LOCK_REQ_DRIVER;
666         else
667                 bit = 1 << tp->pci_fn;
668
669         tg3_ape_write32(tp, req + off, bit);
670
671         /* Wait for up to 1 millisecond to acquire lock. */
672         for (i = 0; i < 100; i++) {
673                 status = tg3_ape_read32(tp, gnt + off);
674                 if (status == bit)
675                         break;
676                 udelay(10);
677         }
678
679         if (status != bit) {
680                 /* Revoke the lock request. */
681                 tg3_ape_write32(tp, gnt + off, bit);
682                 ret = -EBUSY;
683         }
684
685         return ret;
686 }
687
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689 {
690         u32 gnt, bit;
691
692         if (!tg3_flag(tp, ENABLE_APE))
693                 return;
694
695         switch (locknum) {
696         case TG3_APE_LOCK_GPIO:
697                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698                         return;
699         case TG3_APE_LOCK_GRC:
700         case TG3_APE_LOCK_MEM:
701                 break;
702         default:
703                 return;
704         }
705
706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707                 gnt = TG3_APE_LOCK_GRANT;
708         else
709                 gnt = TG3_APE_PER_LOCK_GRANT;
710
711         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712                 bit = APE_LOCK_GRANT_DRIVER;
713         else
714                 bit = 1 << tp->pci_fn;
715
716         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
717 }
718
719 static void tg3_disable_ints(struct tg3 *tp)
720 {
721         int i;
722
723         tw32(TG3PCI_MISC_HOST_CTRL,
724              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725         for (i = 0; i < tp->irq_max; i++)
726                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
727 }
728
729 static void tg3_enable_ints(struct tg3 *tp)
730 {
731         int i;
732
733         tp->irq_sync = 0;
734         wmb();
735
736         tw32(TG3PCI_MISC_HOST_CTRL,
737              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
738
739         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740         for (i = 0; i < tp->irq_cnt; i++) {
741                 struct tg3_napi *tnapi = &tp->napi[i];
742
743                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744                 if (tg3_flag(tp, 1SHOT_MSI))
745                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
746
747                 tp->coal_now |= tnapi->coal_now;
748         }
749
750         /* Force an initial interrupt */
751         if (!tg3_flag(tp, TAGGED_STATUS) &&
752             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754         else
755                 tw32(HOSTCC_MODE, tp->coal_now);
756
757         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
758 }
759
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
761 {
762         struct tg3 *tp = tnapi->tp;
763         struct tg3_hw_status *sblk = tnapi->hw_status;
764         unsigned int work_exists = 0;
765
766         /* check for phy events */
767         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768                 if (sblk->status & SD_STATUS_LINK_CHG)
769                         work_exists = 1;
770         }
771         /* check for RX/TX work to do */
772         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774                 work_exists = 1;
775
776         return work_exists;
777 }
778
779 /* tg3_int_reenable
780  *  similar to tg3_enable_ints, but it accurately determines whether there
781  *  is new work pending and can return without flushing the PIO write
782  *  which reenables interrupts
783  */
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
785 {
786         struct tg3 *tp = tnapi->tp;
787
788         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789         mmiowb();
790
791         /* When doing tagged status, this work check is unnecessary.
792          * The last_tag we write above tells the chip which piece of
793          * work we've completed.
794          */
795         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796                 tw32(HOSTCC_MODE, tp->coalesce_mode |
797                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
798 }
799
800 static void tg3_switch_clocks(struct tg3 *tp)
801 {
802         u32 clock_ctrl;
803         u32 orig_clock_ctrl;
804
805         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806                 return;
807
808         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
810         orig_clock_ctrl = clock_ctrl;
811         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812                        CLOCK_CTRL_CLKRUN_OENABLE |
813                        0x1f);
814         tp->pci_clock_ctrl = clock_ctrl;
815
816         if (tg3_flag(tp, 5705_PLUS)) {
817                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
819                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
820                 }
821         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823                             clock_ctrl |
824                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825                             40);
826                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
828                             40);
829         }
830         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
831 }
832
833 #define PHY_BUSY_LOOPS  5000
834
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842                 tw32_f(MAC_MI_MODE,
843                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844                 udelay(80);
845         }
846
847         *val = 0x0;
848
849         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850                       MI_COM_PHY_ADDR_MASK);
851         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852                       MI_COM_REG_ADDR_MASK);
853         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
854
855         tw32_f(MAC_MI_COM, frame_val);
856
857         loops = PHY_BUSY_LOOPS;
858         while (loops != 0) {
859                 udelay(10);
860                 frame_val = tr32(MAC_MI_COM);
861
862                 if ((frame_val & MI_COM_BUSY) == 0) {
863                         udelay(5);
864                         frame_val = tr32(MAC_MI_COM);
865                         break;
866                 }
867                 loops -= 1;
868         }
869
870         ret = -EBUSY;
871         if (loops != 0) {
872                 *val = frame_val & MI_COM_DATA_MASK;
873                 ret = 0;
874         }
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 {
886         u32 frame_val;
887         unsigned int loops;
888         int ret;
889
890         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892                 return 0;
893
894         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895                 tw32_f(MAC_MI_MODE,
896                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897                 udelay(80);
898         }
899
900         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901                       MI_COM_PHY_ADDR_MASK);
902         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903                       MI_COM_REG_ADDR_MASK);
904         frame_val |= (val & MI_COM_DATA_MASK);
905         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
906
907         tw32_f(MAC_MI_COM, frame_val);
908
909         loops = PHY_BUSY_LOOPS;
910         while (loops != 0) {
911                 udelay(10);
912                 frame_val = tr32(MAC_MI_COM);
913                 if ((frame_val & MI_COM_BUSY) == 0) {
914                         udelay(5);
915                         frame_val = tr32(MAC_MI_COM);
916                         break;
917                 }
918                 loops -= 1;
919         }
920
921         ret = -EBUSY;
922         if (loops != 0)
923                 ret = 0;
924
925         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926                 tw32_f(MAC_MI_MODE, tp->mi_mode);
927                 udelay(80);
928         }
929
930         return ret;
931 }
932
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961         if (err)
962                 goto done;
963
964         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965         if (err)
966                 goto done;
967
968         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970         if (err)
971                 goto done;
972
973         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975 done:
976         return err;
977 }
978
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980 {
981         int err;
982
983         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984         if (!err)
985                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987         return err;
988 }
989
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991 {
992         int err;
993
994         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995         if (!err)
996                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998         return err;
999 }
1000
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002 {
1003         int err;
1004
1005         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1008         if (!err)
1009                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011         return err;
1012 }
1013
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015 {
1016         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017                 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020 }
1021
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1032 {
1033         u32 phy_control;
1034         int limit, err;
1035
1036         /* OK, reset it, and poll the BMCR_RESET bit until it
1037          * clears or we time out.
1038          */
1039         phy_control = BMCR_RESET;
1040         err = tg3_writephy(tp, MII_BMCR, phy_control);
1041         if (err != 0)
1042                 return -EBUSY;
1043
1044         limit = 5000;
1045         while (limit--) {
1046                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047                 if (err != 0)
1048                         return -EBUSY;
1049
1050                 if ((phy_control & BMCR_RESET) == 0) {
1051                         udelay(40);
1052                         break;
1053                 }
1054                 udelay(10);
1055         }
1056         if (limit < 0)
1057                 return -EBUSY;
1058
1059         return 0;
1060 }
1061
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063 {
1064         struct tg3 *tp = bp->priv;
1065         u32 val;
1066
1067         spin_lock_bh(&tp->lock);
1068
1069         if (tg3_readphy(tp, reg, &val))
1070                 val = -EIO;
1071
1072         spin_unlock_bh(&tp->lock);
1073
1074         return val;
1075 }
1076
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078 {
1079         struct tg3 *tp = bp->priv;
1080         u32 ret = 0;
1081
1082         spin_lock_bh(&tp->lock);
1083
1084         if (tg3_writephy(tp, reg, val))
1085                 ret = -EIO;
1086
1087         spin_unlock_bh(&tp->lock);
1088
1089         return ret;
1090 }
1091
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1093 {
1094         return 0;
1095 }
1096
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1098 {
1099         u32 val;
1100         struct phy_device *phydev;
1101
1102         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104         case PHY_ID_BCM50610:
1105         case PHY_ID_BCM50610M:
1106                 val = MAC_PHYCFG2_50610_LED_MODES;
1107                 break;
1108         case PHY_ID_BCMAC131:
1109                 val = MAC_PHYCFG2_AC131_LED_MODES;
1110                 break;
1111         case PHY_ID_RTL8211C:
1112                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113                 break;
1114         case PHY_ID_RTL8201E:
1115                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116                 break;
1117         default:
1118                 return;
1119         }
1120
1121         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122                 tw32(MAC_PHYCFG2, val);
1123
1124                 val = tr32(MAC_PHYCFG1);
1125                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128                 tw32(MAC_PHYCFG1, val);
1129
1130                 return;
1131         }
1132
1133         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135                        MAC_PHYCFG2_FMODE_MASK_MASK |
1136                        MAC_PHYCFG2_GMODE_MASK_MASK |
1137                        MAC_PHYCFG2_ACT_MASK_MASK   |
1138                        MAC_PHYCFG2_QUAL_MASK_MASK |
1139                        MAC_PHYCFG2_INBAND_ENABLE;
1140
1141         tw32(MAC_PHYCFG2, val);
1142
1143         val = tr32(MAC_PHYCFG1);
1144         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151         }
1152         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154         tw32(MAC_PHYCFG1, val);
1155
1156         val = tr32(MAC_EXT_RGMII_MODE);
1157         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158                  MAC_RGMII_MODE_RX_QUALITY |
1159                  MAC_RGMII_MODE_RX_ACTIVITY |
1160                  MAC_RGMII_MODE_RX_ENG_DET |
1161                  MAC_RGMII_MODE_TX_ENABLE |
1162                  MAC_RGMII_MODE_TX_LOWPWR |
1163                  MAC_RGMII_MODE_TX_RESET);
1164         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166                         val |= MAC_RGMII_MODE_RX_INT_B |
1167                                MAC_RGMII_MODE_RX_QUALITY |
1168                                MAC_RGMII_MODE_RX_ACTIVITY |
1169                                MAC_RGMII_MODE_RX_ENG_DET;
1170                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171                         val |= MAC_RGMII_MODE_TX_ENABLE |
1172                                MAC_RGMII_MODE_TX_LOWPWR |
1173                                MAC_RGMII_MODE_TX_RESET;
1174         }
1175         tw32(MAC_EXT_RGMII_MODE, val);
1176 }
1177
1178 static void tg3_mdio_start(struct tg3 *tp)
1179 {
1180         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181         tw32_f(MAC_MI_MODE, tp->mi_mode);
1182         udelay(80);
1183
1184         if (tg3_flag(tp, MDIOBUS_INITED) &&
1185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186                 tg3_mdio_config_5785(tp);
1187 }
1188
1189 static int tg3_mdio_init(struct tg3 *tp)
1190 {
1191         int i;
1192         u32 reg;
1193         struct phy_device *phydev;
1194
1195         if (tg3_flag(tp, 5717_PLUS)) {
1196                 u32 is_serdes;
1197
1198                 tp->phy_addr = tp->pci_fn + 1;
1199
1200                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202                 else
1203                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1205                 if (is_serdes)
1206                         tp->phy_addr += 7;
1207         } else
1208                 tp->phy_addr = TG3_PHY_MII_ADDR;
1209
1210         tg3_mdio_start(tp);
1211
1212         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213                 return 0;
1214
1215         tp->mdio_bus = mdiobus_alloc();
1216         if (tp->mdio_bus == NULL)
1217                 return -ENOMEM;
1218
1219         tp->mdio_bus->name     = "tg3 mdio bus";
1220         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222         tp->mdio_bus->priv     = tp;
1223         tp->mdio_bus->parent   = &tp->pdev->dev;
1224         tp->mdio_bus->read     = &tg3_mdio_read;
1225         tp->mdio_bus->write    = &tg3_mdio_write;
1226         tp->mdio_bus->reset    = &tg3_mdio_reset;
1227         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1229
1230         for (i = 0; i < PHY_MAX_ADDR; i++)
1231                 tp->mdio_bus->irq[i] = PHY_POLL;
1232
1233         /* The bus registration will look for all the PHYs on the mdio bus.
1234          * Unfortunately, it does not ensure the PHY is powered up before
1235          * accessing the PHY ID registers.  A chip reset is the
1236          * quickest way to bring the device back to an operational state..
1237          */
1238         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239                 tg3_bmcr_reset(tp);
1240
1241         i = mdiobus_register(tp->mdio_bus);
1242         if (i) {
1243                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244                 mdiobus_free(tp->mdio_bus);
1245                 return i;
1246         }
1247
1248         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1249
1250         if (!phydev || !phydev->drv) {
1251                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252                 mdiobus_unregister(tp->mdio_bus);
1253                 mdiobus_free(tp->mdio_bus);
1254                 return -ENODEV;
1255         }
1256
1257         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258         case PHY_ID_BCM57780:
1259                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261                 break;
1262         case PHY_ID_BCM50610:
1263         case PHY_ID_BCM50610M:
1264                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265                                      PHY_BRCM_RX_REFCLK_UNUSED |
1266                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274                 /* fallthru */
1275         case PHY_ID_RTL8211C:
1276                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277                 break;
1278         case PHY_ID_RTL8201E:
1279         case PHY_ID_BCMAC131:
1280                 phydev->interface = PHY_INTERFACE_MODE_MII;
1281                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283                 break;
1284         }
1285
1286         tg3_flag_set(tp, MDIOBUS_INITED);
1287
1288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289                 tg3_mdio_config_5785(tp);
1290
1291         return 0;
1292 }
1293
1294 static void tg3_mdio_fini(struct tg3 *tp)
1295 {
1296         if (tg3_flag(tp, MDIOBUS_INITED)) {
1297                 tg3_flag_clear(tp, MDIOBUS_INITED);
1298                 mdiobus_unregister(tp->mdio_bus);
1299                 mdiobus_free(tp->mdio_bus);
1300         }
1301 }
1302
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1305 {
1306         u32 val;
1307
1308         val = tr32(GRC_RX_CPU_EVENT);
1309         val |= GRC_RX_CPU_DRIVER_EVENT;
1310         tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312         tp->last_event_jiffies = jiffies;
1313 }
1314
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1319 {
1320         int i;
1321         unsigned int delay_cnt;
1322         long time_remain;
1323
1324         /* If enough time has passed, no wait is necessary. */
1325         time_remain = (long)(tp->last_event_jiffies + 1 +
1326                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327                       (long)jiffies;
1328         if (time_remain < 0)
1329                 return;
1330
1331         /* Check if we can shorten the wait time. */
1332         delay_cnt = jiffies_to_usecs(time_remain);
1333         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335         delay_cnt = (delay_cnt >> 3) + 1;
1336
1337         for (i = 0; i < delay_cnt; i++) {
1338                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339                         break;
1340                 udelay(8);
1341         }
1342 }
1343
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1346 {
1347         u32 reg;
1348         u32 val;
1349
1350         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351                 return;
1352
1353         tg3_wait_for_event_ack(tp);
1354
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359         val = 0;
1360         if (!tg3_readphy(tp, MII_BMCR, &reg))
1361                 val = reg << 16;
1362         if (!tg3_readphy(tp, MII_BMSR, &reg))
1363                 val |= (reg & 0xffff);
1364         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366         val = 0;
1367         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368                 val = reg << 16;
1369         if (!tg3_readphy(tp, MII_LPA, &reg))
1370                 val |= (reg & 0xffff);
1371         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373         val = 0;
1374         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376                         val = reg << 16;
1377                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378                         val |= (reg & 0xffff);
1379         }
1380         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383                 val = reg << 16;
1384         else
1385                 val = 0;
1386         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
1388         tg3_generate_fw_event(tp);
1389 }
1390
1391 static void tg3_link_report(struct tg3 *tp)
1392 {
1393         if (!netif_carrier_ok(tp->dev)) {
1394                 netif_info(tp, link, tp->dev, "Link is down\n");
1395                 tg3_ump_link_report(tp);
1396         } else if (netif_msg_link(tp)) {
1397                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398                             (tp->link_config.active_speed == SPEED_1000 ?
1399                              1000 :
1400                              (tp->link_config.active_speed == SPEED_100 ?
1401                               100 : 10)),
1402                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1403                              "full" : "half"));
1404
1405                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407                             "on" : "off",
1408                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409                             "on" : "off");
1410
1411                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412                         netdev_info(tp->dev, "EEE is %s\n",
1413                                     tp->setlpicnt ? "enabled" : "disabled");
1414
1415                 tg3_ump_link_report(tp);
1416         }
1417 }
1418
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420 {
1421         u16 miireg;
1422
1423         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424                 miireg = ADVERTISE_PAUSE_CAP;
1425         else if (flow_ctrl & FLOW_CTRL_TX)
1426                 miireg = ADVERTISE_PAUSE_ASYM;
1427         else if (flow_ctrl & FLOW_CTRL_RX)
1428                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429         else
1430                 miireg = 0;
1431
1432         return miireg;
1433 }
1434
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436 {
1437         u16 miireg;
1438
1439         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440                 miireg = ADVERTISE_1000XPAUSE;
1441         else if (flow_ctrl & FLOW_CTRL_TX)
1442                 miireg = ADVERTISE_1000XPSE_ASYM;
1443         else if (flow_ctrl & FLOW_CTRL_RX)
1444                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445         else
1446                 miireg = 0;
1447
1448         return miireg;
1449 }
1450
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452 {
1453         u8 cap = 0;
1454
1455         if (lcladv & ADVERTISE_1000XPAUSE) {
1456                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457                         if (rmtadv & LPA_1000XPAUSE)
1458                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460                                 cap = FLOW_CTRL_RX;
1461                 } else {
1462                         if (rmtadv & LPA_1000XPAUSE)
1463                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1464                 }
1465         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467                         cap = FLOW_CTRL_TX;
1468         }
1469
1470         return cap;
1471 }
1472
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1474 {
1475         u8 autoneg;
1476         u8 flowctrl = 0;
1477         u32 old_rx_mode = tp->rx_mode;
1478         u32 old_tx_mode = tp->tx_mode;
1479
1480         if (tg3_flag(tp, USE_PHYLIB))
1481                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482         else
1483                 autoneg = tp->link_config.autoneg;
1484
1485         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488                 else
1489                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490         } else
1491                 flowctrl = tp->link_config.flowctrl;
1492
1493         tp->link_config.active_flowctrl = flowctrl;
1494
1495         if (flowctrl & FLOW_CTRL_RX)
1496                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497         else
1498                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500         if (old_rx_mode != tp->rx_mode)
1501                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502
1503         if (flowctrl & FLOW_CTRL_TX)
1504                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_tx_mode != tp->tx_mode)
1509                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510 }
1511
1512 static void tg3_adjust_link(struct net_device *dev)
1513 {
1514         u8 oldflowctrl, linkmesg = 0;
1515         u32 mac_mode, lcl_adv, rmt_adv;
1516         struct tg3 *tp = netdev_priv(dev);
1517         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1518
1519         spin_lock_bh(&tp->lock);
1520
1521         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522                                     MAC_MODE_HALF_DUPLEX);
1523
1524         oldflowctrl = tp->link_config.active_flowctrl;
1525
1526         if (phydev->link) {
1527                 lcl_adv = 0;
1528                 rmt_adv = 0;
1529
1530                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1532                 else if (phydev->speed == SPEED_1000 ||
1533                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535                 else
1536                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1537
1538                 if (phydev->duplex == DUPLEX_HALF)
1539                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1540                 else {
1541                         lcl_adv = tg3_advert_flowctrl_1000T(
1542                                   tp->link_config.flowctrl);
1543
1544                         if (phydev->pause)
1545                                 rmt_adv = LPA_PAUSE_CAP;
1546                         if (phydev->asym_pause)
1547                                 rmt_adv |= LPA_PAUSE_ASYM;
1548                 }
1549
1550                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551         } else
1552                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554         if (mac_mode != tp->mac_mode) {
1555                 tp->mac_mode = mac_mode;
1556                 tw32_f(MAC_MODE, tp->mac_mode);
1557                 udelay(40);
1558         }
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561                 if (phydev->speed == SPEED_10)
1562                         tw32(MAC_MI_STAT,
1563                              MAC_MI_STAT_10MBPS_MODE |
1564                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565                 else
1566                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567         }
1568
1569         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570                 tw32(MAC_TX_LENGTHS,
1571                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572                       (6 << TX_LENGTHS_IPG_SHIFT) |
1573                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574         else
1575                 tw32(MAC_TX_LENGTHS,
1576                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577                       (6 << TX_LENGTHS_IPG_SHIFT) |
1578                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582             phydev->speed != tp->link_config.active_speed ||
1583             phydev->duplex != tp->link_config.active_duplex ||
1584             oldflowctrl != tp->link_config.active_flowctrl)
1585                 linkmesg = 1;
1586
1587         tp->link_config.active_speed = phydev->speed;
1588         tp->link_config.active_duplex = phydev->duplex;
1589
1590         spin_unlock_bh(&tp->lock);
1591
1592         if (linkmesg)
1593                 tg3_link_report(tp);
1594 }
1595
1596 static int tg3_phy_init(struct tg3 *tp)
1597 {
1598         struct phy_device *phydev;
1599
1600         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601                 return 0;
1602
1603         /* Bring the PHY back to a known state. */
1604         tg3_bmcr_reset(tp);
1605
1606         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607
1608         /* Attach the MAC to the PHY. */
1609         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610                              phydev->dev_flags, phydev->interface);
1611         if (IS_ERR(phydev)) {
1612                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613                 return PTR_ERR(phydev);
1614         }
1615
1616         /* Mask with MAC supported features. */
1617         switch (phydev->interface) {
1618         case PHY_INTERFACE_MODE_GMII:
1619         case PHY_INTERFACE_MODE_RGMII:
1620                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621                         phydev->supported &= (PHY_GBIT_FEATURES |
1622                                               SUPPORTED_Pause |
1623                                               SUPPORTED_Asym_Pause);
1624                         break;
1625                 }
1626                 /* fallthru */
1627         case PHY_INTERFACE_MODE_MII:
1628                 phydev->supported &= (PHY_BASIC_FEATURES |
1629                                       SUPPORTED_Pause |
1630                                       SUPPORTED_Asym_Pause);
1631                 break;
1632         default:
1633                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634                 return -EINVAL;
1635         }
1636
1637         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1638
1639         phydev->advertising = phydev->supported;
1640
1641         return 0;
1642 }
1643
1644 static void tg3_phy_start(struct tg3 *tp)
1645 {
1646         struct phy_device *phydev;
1647
1648         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649                 return;
1650
1651         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1652
1653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655                 phydev->speed = tp->link_config.orig_speed;
1656                 phydev->duplex = tp->link_config.orig_duplex;
1657                 phydev->autoneg = tp->link_config.orig_autoneg;
1658                 phydev->advertising = tp->link_config.orig_advertising;
1659         }
1660
1661         phy_start(phydev);
1662
1663         phy_start_aneg(phydev);
1664 }
1665
1666 static void tg3_phy_stop(struct tg3 *tp)
1667 {
1668         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669                 return;
1670
1671         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1672 }
1673
1674 static void tg3_phy_fini(struct tg3 *tp)
1675 {
1676         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1679         }
1680 }
1681
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683 {
1684         u32 phytest;
1685
1686         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687                 u32 phy;
1688
1689                 tg3_writephy(tp, MII_TG3_FET_TEST,
1690                              phytest | MII_TG3_FET_SHADOW_EN);
1691                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692                         if (enable)
1693                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694                         else
1695                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697                 }
1698                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699         }
1700 }
1701
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703 {
1704         u32 reg;
1705
1706         if (!tg3_flag(tp, 5705_PLUS) ||
1707             (tg3_flag(tp, 5717_PLUS) &&
1708              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709                 return;
1710
1711         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712                 tg3_phy_fet_toggle_apd(tp, enable);
1713                 return;
1714         }
1715
1716         reg = MII_TG3_MISC_SHDW_WREN |
1717               MII_TG3_MISC_SHDW_SCR5_SEL |
1718               MII_TG3_MISC_SHDW_SCR5_LPED |
1719               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720               MII_TG3_MISC_SHDW_SCR5_SDTL |
1721               MII_TG3_MISC_SHDW_SCR5_C125OE;
1722         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728         reg = MII_TG3_MISC_SHDW_WREN |
1729               MII_TG3_MISC_SHDW_APD_SEL |
1730               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731         if (enable)
1732                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735 }
1736
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738 {
1739         u32 phy;
1740
1741         if (!tg3_flag(tp, 5705_PLUS) ||
1742             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743                 return;
1744
1745         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746                 u32 ephy;
1747
1748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751                         tg3_writephy(tp, MII_TG3_FET_TEST,
1752                                      ephy | MII_TG3_FET_SHADOW_EN);
1753                         if (!tg3_readphy(tp, reg, &phy)) {
1754                                 if (enable)
1755                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756                                 else
1757                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758                                 tg3_writephy(tp, reg, phy);
1759                         }
1760                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1761                 }
1762         } else {
1763                 int ret;
1764
1765                 ret = tg3_phy_auxctl_read(tp,
1766                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767                 if (!ret) {
1768                         if (enable)
1769                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770                         else
1771                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772                         tg3_phy_auxctl_write(tp,
1773                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1774                 }
1775         }
1776 }
1777
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779 {
1780         int ret;
1781         u32 val;
1782
1783         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784                 return;
1785
1786         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787         if (!ret)
1788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1790 }
1791
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1793 {
1794         u32 otp, phy;
1795
1796         if (!tp->phy_otp)
1797                 return;
1798
1799         otp = tp->phy_otp;
1800
1801         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802                 return;
1803
1804         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1826         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1827 }
1828
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830 {
1831         u32 val;
1832
1833         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834                 return;
1835
1836         tp->setlpicnt = 0;
1837
1838         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839             current_link_up == 1 &&
1840             tp->link_config.active_duplex == DUPLEX_FULL &&
1841             (tp->link_config.active_speed == SPEED_100 ||
1842              tp->link_config.active_speed == SPEED_1000)) {
1843                 u32 eeectl;
1844
1845                 if (tp->link_config.active_speed == SPEED_1000)
1846                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847                 else
1848                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
1852                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853                                   TG3_CL45_D7_EEERES_STAT, &val);
1854
1855                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857                         tp->setlpicnt = 2;
1858         }
1859
1860         if (!tp->setlpicnt) {
1861                 if (current_link_up == 1 &&
1862                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1865                 }
1866
1867                 val = tr32(TG3_CPMU_EEE_MODE);
1868                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1869         }
1870 }
1871
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1873 {
1874         u32 val;
1875
1876         if (tp->link_config.active_speed == SPEED_1000 &&
1877             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881                 val = MII_TG3_DSP_TAP26_ALNOKO |
1882                       MII_TG3_DSP_TAP26_RMRXSTO;
1883                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1885         }
1886
1887         val = tr32(TG3_CPMU_EEE_MODE);
1888         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1889 }
1890
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1892 {
1893         int limit = 100;
1894
1895         while (limit--) {
1896                 u32 tmp32;
1897
1898                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899                         if ((tmp32 & 0x1000) == 0)
1900                                 break;
1901                 }
1902         }
1903         if (limit < 0)
1904                 return -EBUSY;
1905
1906         return 0;
1907 }
1908
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1910 {
1911         static const u32 test_pat[4][6] = {
1912         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1916         };
1917         int chan;
1918
1919         for (chan = 0; chan < 4; chan++) {
1920                 int i;
1921
1922                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923                              (chan * 0x2000) | 0x0200);
1924                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1925
1926                 for (i = 0; i < 6; i++)
1927                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928                                      test_pat[chan][i]);
1929
1930                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931                 if (tg3_wait_macro_done(tp)) {
1932                         *resetp = 1;
1933                         return -EBUSY;
1934                 }
1935
1936                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937                              (chan * 0x2000) | 0x0200);
1938                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939                 if (tg3_wait_macro_done(tp)) {
1940                         *resetp = 1;
1941                         return -EBUSY;
1942                 }
1943
1944                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945                 if (tg3_wait_macro_done(tp)) {
1946                         *resetp = 1;
1947                         return -EBUSY;
1948                 }
1949
1950                 for (i = 0; i < 6; i += 2) {
1951                         u32 low, high;
1952
1953                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955                             tg3_wait_macro_done(tp)) {
1956                                 *resetp = 1;
1957                                 return -EBUSY;
1958                         }
1959                         low &= 0x7fff;
1960                         high &= 0x000f;
1961                         if (low != test_pat[chan][i] ||
1962                             high != test_pat[chan][i+1]) {
1963                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1966
1967                                 return -EBUSY;
1968                         }
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1976 {
1977         int chan;
1978
1979         for (chan = 0; chan < 4; chan++) {
1980                 int i;
1981
1982                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983                              (chan * 0x2000) | 0x0200);
1984                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985                 for (i = 0; i < 6; i++)
1986                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988                 if (tg3_wait_macro_done(tp))
1989                         return -EBUSY;
1990         }
1991
1992         return 0;
1993 }
1994
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1996 {
1997         u32 reg32, phy9_orig;
1998         int retries, do_phy_reset, err;
1999
2000         retries = 10;
2001         do_phy_reset = 1;
2002         do {
2003                 if (do_phy_reset) {
2004                         err = tg3_bmcr_reset(tp);
2005                         if (err)
2006                                 return err;
2007                         do_phy_reset = 0;
2008                 }
2009
2010                 /* Disable transmitter and interrupt.  */
2011                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012                         continue;
2013
2014                 reg32 |= 0x3000;
2015                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2016
2017                 /* Set full-duplex, 1000 mbps.  */
2018                 tg3_writephy(tp, MII_BMCR,
2019                              BMCR_FULLDPLX | BMCR_SPEED1000);
2020
2021                 /* Set to master mode.  */
2022                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023                         continue;
2024
2025                 tg3_writephy(tp, MII_CTRL1000,
2026                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2027
2028                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029                 if (err)
2030                         return err;
2031
2032                 /* Block the PHY control access.  */
2033                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2034
2035                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036                 if (!err)
2037                         break;
2038         } while (--retries);
2039
2040         err = tg3_phy_reset_chanpat(tp);
2041         if (err)
2042                 return err;
2043
2044         tg3_phydsp_write(tp, 0x8005, 0x0000);
2045
2046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2048
2049         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2050
2051         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2052
2053         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054                 reg32 &= ~0x3000;
2055                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056         } else if (!err)
2057                 err = -EBUSY;
2058
2059         return err;
2060 }
2061
2062 /* This will reset the tigon3 PHY if there is no valid
2063  * link unless the FORCE argument is non-zero.
2064  */
2065 static int tg3_phy_reset(struct tg3 *tp)
2066 {
2067         u32 val, cpmuctrl;
2068         int err;
2069
2070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071                 val = tr32(GRC_MISC_CFG);
2072                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073                 udelay(40);
2074         }
2075         err  = tg3_readphy(tp, MII_BMSR, &val);
2076         err |= tg3_readphy(tp, MII_BMSR, &val);
2077         if (err != 0)
2078                 return -EBUSY;
2079
2080         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081                 netif_carrier_off(tp->dev);
2082                 tg3_link_report(tp);
2083         }
2084
2085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088                 err = tg3_phy_reset_5703_4_5(tp);
2089                 if (err)
2090                         return err;
2091                 goto out;
2092         }
2093
2094         cpmuctrl = 0;
2095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099                         tw32(TG3_CPMU_CTRL,
2100                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2101         }
2102
2103         err = tg3_bmcr_reset(tp);
2104         if (err)
2105                 return err;
2106
2107         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2110
2111                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2112         }
2113
2114         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2119                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120                         udelay(40);
2121                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2122                 }
2123         }
2124
2125         if (tg3_flag(tp, 5717_PLUS) &&
2126             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127                 return 0;
2128
2129         tg3_phy_apply_otp(tp);
2130
2131         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132                 tg3_phy_toggle_apd(tp, true);
2133         else
2134                 tg3_phy_toggle_apd(tp, false);
2135
2136 out:
2137         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142         }
2143
2144         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147         }
2148
2149         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2152                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2153                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2155                 }
2156         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161                                 tg3_writephy(tp, MII_TG3_TEST1,
2162                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2163                         } else
2164                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2165
2166                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167                 }
2168         }
2169
2170         /* Set Extended packet length bit (bit 14) on all chips that */
2171         /* support jumbo frames */
2172         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173                 /* Cannot do read-modify-write on 5401 */
2174                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176                 /* Set bit 14 with read-modify-write to preserve other bits */
2177                 err = tg3_phy_auxctl_read(tp,
2178                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179                 if (!err)
2180                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2182         }
2183
2184         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185          * jumbo frames transmission.
2186          */
2187         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2191         }
2192
2193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194                 /* adjust output voltage */
2195                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2196         }
2197
2198         tg3_phy_toggle_automdix(tp, 1);
2199         tg3_phy_set_wirespeed(tp);
2200         return 0;
2201 }
2202
2203 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2205 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2206                                           TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211          (TG3_GPIO_MSG_DRVR_PRES << 12))
2212
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217          (TG3_GPIO_MSG_NEED_VAUX << 12))
2218
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2220 {
2221         u32 status, shift;
2222
2223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226         else
2227                 status = tr32(TG3_CPMU_DRV_STATUS);
2228
2229         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230         status &= ~(TG3_GPIO_MSG_MASK << shift);
2231         status |= (newstat << shift);
2232
2233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236         else
2237                 tw32(TG3_CPMU_DRV_STATUS, status);
2238
2239         return status >> TG3_APE_GPIO_MSG_SHIFT;
2240 }
2241
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2243 {
2244         if (!tg3_flag(tp, IS_NIC))
2245                 return 0;
2246
2247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251                         return -EIO;
2252
2253                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2254
2255                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2257
2258                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259         } else {
2260                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2262         }
2263
2264         return 0;
2265 }
2266
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2268 {
2269         u32 grc_local_ctrl;
2270
2271         if (!tg3_flag(tp, IS_NIC) ||
2272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274                 return;
2275
2276         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2277
2278         tw32_wait_f(GRC_LOCAL_CTRL,
2279                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2281
2282         tw32_wait_f(GRC_LOCAL_CTRL,
2283                     grc_local_ctrl,
2284                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2285
2286         tw32_wait_f(GRC_LOCAL_CTRL,
2287                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2289 }
2290
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2292 {
2293         if (!tg3_flag(tp, IS_NIC))
2294                 return;
2295
2296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299                             (GRC_LCLCTRL_GPIO_OE0 |
2300                              GRC_LCLCTRL_GPIO_OE1 |
2301                              GRC_LCLCTRL_GPIO_OE2 |
2302                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2303                              GRC_LCLCTRL_GPIO_OUTPUT1),
2304                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2305         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309                                      GRC_LCLCTRL_GPIO_OE1 |
2310                                      GRC_LCLCTRL_GPIO_OE2 |
2311                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2312                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2313                                      tp->grc_local_ctrl;
2314                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2316
2317                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2320
2321                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2324         } else {
2325                 u32 no_gpio2;
2326                 u32 grc_local_ctrl = 0;
2327
2328                 /* Workaround to prevent overdrawing Amps. */
2329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332                                     grc_local_ctrl,
2333                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2334                 }
2335
2336                 /* On 5753 and variants, GPIO2 cannot be used. */
2337                 no_gpio2 = tp->nic_sram_data_cfg &
2338                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2339
2340                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341                                   GRC_LCLCTRL_GPIO_OE1 |
2342                                   GRC_LCLCTRL_GPIO_OE2 |
2343                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2344                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2345                 if (no_gpio2) {
2346                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2348                 }
2349                 tw32_wait_f(GRC_LOCAL_CTRL,
2350                             tp->grc_local_ctrl | grc_local_ctrl,
2351                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2352
2353                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2354
2355                 tw32_wait_f(GRC_LOCAL_CTRL,
2356                             tp->grc_local_ctrl | grc_local_ctrl,
2357                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2358
2359                 if (!no_gpio2) {
2360                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361                         tw32_wait_f(GRC_LOCAL_CTRL,
2362                                     tp->grc_local_ctrl | grc_local_ctrl,
2363                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2364                 }
2365         }
2366 }
2367
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2369 {
2370         u32 msg = 0;
2371
2372         /* Serialize power state transitions */
2373         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374                 return;
2375
2376         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377                 msg = TG3_GPIO_MSG_NEED_VAUX;
2378
2379         msg = tg3_set_function_status(tp, msg);
2380
2381         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382                 goto done;
2383
2384         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385                 tg3_pwrsrc_switch_to_vaux(tp);
2386         else
2387                 tg3_pwrsrc_die_with_vmain(tp);
2388
2389 done:
2390         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2391 }
2392
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2394 {
2395         bool need_vaux = false;
2396
2397         /* The GPIOs do something completely different on 57765. */
2398         if (!tg3_flag(tp, IS_NIC) ||
2399             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400                 return;
2401
2402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405                 tg3_frob_aux_power_5717(tp, include_wol ?
2406                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407                 return;
2408         }
2409
2410         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411                 struct net_device *dev_peer;
2412
2413                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2414
2415                 /* remove_one() may have been run on the peer. */
2416                 if (dev_peer) {
2417                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2418
2419                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2420                                 return;
2421
2422                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423                             tg3_flag(tp_peer, ENABLE_ASF))
2424                                 need_vaux = true;
2425                 }
2426         }
2427
2428         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429             tg3_flag(tp, ENABLE_ASF))
2430                 need_vaux = true;
2431
2432         if (need_vaux)
2433                 tg3_pwrsrc_switch_to_vaux(tp);
2434         else
2435                 tg3_pwrsrc_die_with_vmain(tp);
2436 }
2437
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2439 {
2440         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441                 return 1;
2442         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443                 if (speed != SPEED_10)
2444                         return 1;
2445         } else if (speed == SPEED_10)
2446                 return 1;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_setup_phy(struct tg3 *, int);
2452
2453 #define RESET_KIND_SHUTDOWN     0
2454 #define RESET_KIND_INIT         1
2455 #define RESET_KIND_SUSPEND      2
2456
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2459
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2461 {
2462         u32 val;
2463
2464         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2468
2469                         sg_dig_ctrl |=
2470                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2473                 }
2474                 return;
2475         }
2476
2477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478                 tg3_bmcr_reset(tp);
2479                 val = tr32(GRC_MISC_CFG);
2480                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481                 udelay(40);
2482                 return;
2483         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484                 u32 phytest;
2485                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486                         u32 phy;
2487
2488                         tg3_writephy(tp, MII_ADVERTISE, 0);
2489                         tg3_writephy(tp, MII_BMCR,
2490                                      BMCR_ANENABLE | BMCR_ANRESTART);
2491
2492                         tg3_writephy(tp, MII_TG3_FET_TEST,
2493                                      phytest | MII_TG3_FET_SHADOW_EN);
2494                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496                                 tg3_writephy(tp,
2497                                              MII_TG3_FET_SHDW_AUXMODE4,
2498                                              phy);
2499                         }
2500                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2501                 }
2502                 return;
2503         } else if (do_low_power) {
2504                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2506
2507                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2510                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2511         }
2512
2513         /* The PHY should not be powered down on some chips because
2514          * of bugs.
2515          */
2516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520                 return;
2521
2522         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2528         }
2529
2530         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2531 }
2532
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, NVRAM)) {
2537                 int i;
2538
2539                 if (tp->nvram_lock_cnt == 0) {
2540                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541                         for (i = 0; i < 8000; i++) {
2542                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543                                         break;
2544                                 udelay(20);
2545                         }
2546                         if (i == 8000) {
2547                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548                                 return -ENODEV;
2549                         }
2550                 }
2551                 tp->nvram_lock_cnt++;
2552         }
2553         return 0;
2554 }
2555
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2558 {
2559         if (tg3_flag(tp, NVRAM)) {
2560                 if (tp->nvram_lock_cnt > 0)
2561                         tp->nvram_lock_cnt--;
2562                 if (tp->nvram_lock_cnt == 0)
2563                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2564         }
2565 }
2566
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2569 {
2570         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571                 u32 nvaccess = tr32(NVRAM_ACCESS);
2572
2573                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2574         }
2575 }
2576
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2579 {
2580         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581                 u32 nvaccess = tr32(NVRAM_ACCESS);
2582
2583                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2584         }
2585 }
2586
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588                                         u32 offset, u32 *val)
2589 {
2590         u32 tmp;
2591         int i;
2592
2593         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594                 return -EINVAL;
2595
2596         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597                                         EEPROM_ADDR_DEVID_MASK |
2598                                         EEPROM_ADDR_READ);
2599         tw32(GRC_EEPROM_ADDR,
2600              tmp |
2601              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603               EEPROM_ADDR_ADDR_MASK) |
2604              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2605
2606         for (i = 0; i < 1000; i++) {
2607                 tmp = tr32(GRC_EEPROM_ADDR);
2608
2609                 if (tmp & EEPROM_ADDR_COMPLETE)
2610                         break;
2611                 msleep(1);
2612         }
2613         if (!(tmp & EEPROM_ADDR_COMPLETE))
2614                 return -EBUSY;
2615
2616         tmp = tr32(GRC_EEPROM_DATA);
2617
2618         /*
2619          * The data will always be opposite the native endian
2620          * format.  Perform a blind byteswap to compensate.
2621          */
2622         *val = swab32(tmp);
2623
2624         return 0;
2625 }
2626
2627 #define NVRAM_CMD_TIMEOUT 10000
2628
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2630 {
2631         int i;
2632
2633         tw32(NVRAM_CMD, nvram_cmd);
2634         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635                 udelay(10);
2636                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637                         udelay(10);
2638                         break;
2639                 }
2640         }
2641
2642         if (i == NVRAM_CMD_TIMEOUT)
2643                 return -EBUSY;
2644
2645         return 0;
2646 }
2647
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2649 {
2650         if (tg3_flag(tp, NVRAM) &&
2651             tg3_flag(tp, NVRAM_BUFFERED) &&
2652             tg3_flag(tp, FLASH) &&
2653             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654             (tp->nvram_jedecnum == JEDEC_ATMEL))
2655
2656                 addr = ((addr / tp->nvram_pagesize) <<
2657                         ATMEL_AT45DB0X1B_PAGE_POS) +
2658                        (addr % tp->nvram_pagesize);
2659
2660         return addr;
2661 }
2662
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2664 {
2665         if (tg3_flag(tp, NVRAM) &&
2666             tg3_flag(tp, NVRAM_BUFFERED) &&
2667             tg3_flag(tp, FLASH) &&
2668             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669             (tp->nvram_jedecnum == JEDEC_ATMEL))
2670
2671                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672                         tp->nvram_pagesize) +
2673                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2674
2675         return addr;
2676 }
2677
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679  * the byteswapping settings for all other register accesses.
2680  * tg3 devices are BE devices, so on a BE machine, the data
2681  * returned will be exactly as it is seen in NVRAM.  On a LE
2682  * machine, the 32-bit value will be byteswapped.
2683  */
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2685 {
2686         int ret;
2687
2688         if (!tg3_flag(tp, NVRAM))
2689                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2690
2691         offset = tg3_nvram_phys_addr(tp, offset);
2692
2693         if (offset > NVRAM_ADDR_MSK)
2694                 return -EINVAL;
2695
2696         ret = tg3_nvram_lock(tp);
2697         if (ret)
2698                 return ret;
2699
2700         tg3_enable_nvram_access(tp);
2701
2702         tw32(NVRAM_ADDR, offset);
2703         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2705
2706         if (ret == 0)
2707                 *val = tr32(NVRAM_RDDATA);
2708
2709         tg3_disable_nvram_access(tp);
2710
2711         tg3_nvram_unlock(tp);
2712
2713         return ret;
2714 }
2715
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2718 {
2719         u32 v;
2720         int res = tg3_nvram_read(tp, offset, &v);
2721         if (!res)
2722                 *val = cpu_to_be32(v);
2723         return res;
2724 }
2725
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2728 {
2729         u32 addr_high, addr_low;
2730         int i;
2731
2732         addr_high = ((tp->dev->dev_addr[0] << 8) |
2733                      tp->dev->dev_addr[1]);
2734         addr_low = ((tp->dev->dev_addr[2] << 24) |
2735                     (tp->dev->dev_addr[3] << 16) |
2736                     (tp->dev->dev_addr[4] <<  8) |
2737                     (tp->dev->dev_addr[5] <<  0));
2738         for (i = 0; i < 4; i++) {
2739                 if (i == 1 && skip_mac_1)
2740                         continue;
2741                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2743         }
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747                 for (i = 0; i < 12; i++) {
2748                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2750                 }
2751         }
2752
2753         addr_high = (tp->dev->dev_addr[0] +
2754                      tp->dev->dev_addr[1] +
2755                      tp->dev->dev_addr[2] +
2756                      tp->dev->dev_addr[3] +
2757                      tp->dev->dev_addr[4] +
2758                      tp->dev->dev_addr[5]) &
2759                 TX_BACKOFF_SEED_MASK;
2760         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2761 }
2762
2763 static void tg3_enable_register_access(struct tg3 *tp)
2764 {
2765         /*
2766          * Make sure register accesses (indirect or otherwise) will function
2767          * correctly.
2768          */
2769         pci_write_config_dword(tp->pdev,
2770                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2771 }
2772
2773 static int tg3_power_up(struct tg3 *tp)
2774 {
2775         int err;
2776
2777         tg3_enable_register_access(tp);
2778
2779         err = pci_set_power_state(tp->pdev, PCI_D0);
2780         if (!err) {
2781                 /* Switch out of Vaux if it is a NIC */
2782                 tg3_pwrsrc_switch_to_vmain(tp);
2783         } else {
2784                 netdev_err(tp->dev, "Transition to D0 failed\n");
2785         }
2786
2787         return err;
2788 }
2789
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2791 {
2792         u32 misc_host_ctrl;
2793         bool device_should_wake, do_low_power;
2794
2795         tg3_enable_register_access(tp);
2796
2797         /* Restore the CLKREQ setting. */
2798         if (tg3_flag(tp, CLKREQ_BUG)) {
2799                 u16 lnkctl;
2800
2801                 pci_read_config_word(tp->pdev,
2802                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803                                      &lnkctl);
2804                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805                 pci_write_config_word(tp->pdev,
2806                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807                                       lnkctl);
2808         }
2809
2810         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811         tw32(TG3PCI_MISC_HOST_CTRL,
2812              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2813
2814         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815                              tg3_flag(tp, WOL_ENABLE);
2816
2817         if (tg3_flag(tp, USE_PHYLIB)) {
2818                 do_low_power = false;
2819                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821                         struct phy_device *phydev;
2822                         u32 phyid, advertising;
2823
2824                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2825
2826                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2827
2828                         tp->link_config.orig_speed = phydev->speed;
2829                         tp->link_config.orig_duplex = phydev->duplex;
2830                         tp->link_config.orig_autoneg = phydev->autoneg;
2831                         tp->link_config.orig_advertising = phydev->advertising;
2832
2833                         advertising = ADVERTISED_TP |
2834                                       ADVERTISED_Pause |
2835                                       ADVERTISED_Autoneg |
2836                                       ADVERTISED_10baseT_Half;
2837
2838                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2840                                         advertising |=
2841                                                 ADVERTISED_100baseT_Half |
2842                                                 ADVERTISED_100baseT_Full |
2843                                                 ADVERTISED_10baseT_Full;
2844                                 else
2845                                         advertising |= ADVERTISED_10baseT_Full;
2846                         }
2847
2848                         phydev->advertising = advertising;
2849
2850                         phy_start_aneg(phydev);
2851
2852                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853                         if (phyid != PHY_ID_BCMAC131) {
2854                                 phyid &= PHY_BCM_OUI_MASK;
2855                                 if (phyid == PHY_BCM_OUI_1 ||
2856                                     phyid == PHY_BCM_OUI_2 ||
2857                                     phyid == PHY_BCM_OUI_3)
2858                                         do_low_power = true;
2859                         }
2860                 }
2861         } else {
2862                 do_low_power = true;
2863
2864                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866                         tp->link_config.orig_speed = tp->link_config.speed;
2867                         tp->link_config.orig_duplex = tp->link_config.duplex;
2868                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2869                 }
2870
2871                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872                         tp->link_config.speed = SPEED_10;
2873                         tp->link_config.duplex = DUPLEX_HALF;
2874                         tp->link_config.autoneg = AUTONEG_ENABLE;
2875                         tg3_setup_phy(tp, 0);
2876                 }
2877         }
2878
2879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880                 u32 val;
2881
2882                 val = tr32(GRC_VCPU_EXT_CTRL);
2883                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885                 int i;
2886                 u32 val;
2887
2888                 for (i = 0; i < 200; i++) {
2889                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891                                 break;
2892                         msleep(1);
2893                 }
2894         }
2895         if (tg3_flag(tp, WOL_CAP))
2896                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897                                                      WOL_DRV_STATE_SHUTDOWN |
2898                                                      WOL_DRV_WOL |
2899                                                      WOL_SET_MAGIC_PKT);
2900
2901         if (device_should_wake) {
2902                 u32 mac_mode;
2903
2904                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905                         if (do_low_power &&
2906                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907                                 tg3_phy_auxctl_write(tp,
2908                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2910                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912                                 udelay(40);
2913                         }
2914
2915                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917                         else
2918                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2919
2920                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922                             ASIC_REV_5700) {
2923                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924                                              SPEED_100 : SPEED_10;
2925                                 if (tg3_5700_link_polarity(tp, speed))
2926                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2927                                 else
2928                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2929                         }
2930                 } else {
2931                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2932                 }
2933
2934                 if (!tg3_flag(tp, 5750_PLUS))
2935                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2936
2937                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2941
2942                 if (tg3_flag(tp, ENABLE_APE))
2943                         mac_mode |= MAC_MODE_APE_TX_EN |
2944                                     MAC_MODE_APE_RX_EN |
2945                                     MAC_MODE_TDE_ENABLE;
2946
2947                 tw32_f(MAC_MODE, mac_mode);
2948                 udelay(100);
2949
2950                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951                 udelay(10);
2952         }
2953
2954         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957                 u32 base_val;
2958
2959                 base_val = tp->pci_clock_ctrl;
2960                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961                              CLOCK_CTRL_TXCLK_DISABLE);
2962
2963                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965         } else if (tg3_flag(tp, 5780_CLASS) ||
2966                    tg3_flag(tp, CPMU_PRESENT) ||
2967                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968                 /* do nothing */
2969         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970                 u32 newbits1, newbits2;
2971
2972                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975                                     CLOCK_CTRL_TXCLK_DISABLE |
2976                                     CLOCK_CTRL_ALTCLK);
2977                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978                 } else if (tg3_flag(tp, 5705_PLUS)) {
2979                         newbits1 = CLOCK_CTRL_625_CORE;
2980                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981                 } else {
2982                         newbits1 = CLOCK_CTRL_ALTCLK;
2983                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2984                 }
2985
2986                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987                             40);
2988
2989                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990                             40);
2991
2992                 if (!tg3_flag(tp, 5705_PLUS)) {
2993                         u32 newbits3;
2994
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998                                             CLOCK_CTRL_TXCLK_DISABLE |
2999                                             CLOCK_CTRL_44MHZ_CORE);
3000                         } else {
3001                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3002                         }
3003
3004                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005                                     tp->pci_clock_ctrl | newbits3, 40);
3006                 }
3007         }
3008
3009         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010                 tg3_power_down_phy(tp, do_low_power);
3011
3012         tg3_frob_aux_power(tp, true);
3013
3014         /* Workaround for unstable PLL clock */
3015         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017                 u32 val = tr32(0x7d00);
3018
3019                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020                 tw32(0x7d00, val);
3021                 if (!tg3_flag(tp, ENABLE_ASF)) {
3022                         int err;
3023
3024                         err = tg3_nvram_lock(tp);
3025                         tg3_halt_cpu(tp, RX_CPU_BASE);
3026                         if (!err)
3027                                 tg3_nvram_unlock(tp);
3028                 }
3029         }
3030
3031         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3032
3033         return 0;
3034 }
3035
3036 static void tg3_power_down(struct tg3 *tp)
3037 {
3038         tg3_power_down_prepare(tp);
3039
3040         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041         pci_set_power_state(tp->pdev, PCI_D3hot);
3042 }
3043
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3045 {
3046         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047         case MII_TG3_AUX_STAT_10HALF:
3048                 *speed = SPEED_10;
3049                 *duplex = DUPLEX_HALF;
3050                 break;
3051
3052         case MII_TG3_AUX_STAT_10FULL:
3053                 *speed = SPEED_10;
3054                 *duplex = DUPLEX_FULL;
3055                 break;
3056
3057         case MII_TG3_AUX_STAT_100HALF:
3058                 *speed = SPEED_100;
3059                 *duplex = DUPLEX_HALF;
3060                 break;
3061
3062         case MII_TG3_AUX_STAT_100FULL:
3063                 *speed = SPEED_100;
3064                 *duplex = DUPLEX_FULL;
3065                 break;
3066
3067         case MII_TG3_AUX_STAT_1000HALF:
3068                 *speed = SPEED_1000;
3069                 *duplex = DUPLEX_HALF;
3070                 break;
3071
3072         case MII_TG3_AUX_STAT_1000FULL:
3073                 *speed = SPEED_1000;
3074                 *duplex = DUPLEX_FULL;
3075                 break;
3076
3077         default:
3078                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080                                  SPEED_10;
3081                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082                                   DUPLEX_HALF;
3083                         break;
3084                 }
3085                 *speed = SPEED_INVALID;
3086                 *duplex = DUPLEX_INVALID;
3087                 break;
3088         }
3089 }
3090
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3092 {
3093         int err = 0;
3094         u32 val, new_adv;
3095
3096         new_adv = ADVERTISE_CSMA;
3097         if (advertise & ADVERTISED_10baseT_Half)
3098                 new_adv |= ADVERTISE_10HALF;
3099         if (advertise & ADVERTISED_10baseT_Full)
3100                 new_adv |= ADVERTISE_10FULL;
3101         if (advertise & ADVERTISED_100baseT_Half)
3102                 new_adv |= ADVERTISE_100HALF;
3103         if (advertise & ADVERTISED_100baseT_Full)
3104                 new_adv |= ADVERTISE_100FULL;
3105
3106         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3107
3108         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109         if (err)
3110                 goto done;
3111
3112         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113                 goto done;
3114
3115         new_adv = 0;
3116         if (advertise & ADVERTISED_1000baseT_Half)
3117                 new_adv |= ADVERTISE_1000HALF;
3118         if (advertise & ADVERTISED_1000baseT_Full)
3119                 new_adv |= ADVERTISE_1000FULL;
3120
3121         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3124
3125         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126         if (err)
3127                 goto done;
3128
3129         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130                 goto done;
3131
3132         tw32(TG3_CPMU_EEE_MODE,
3133              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3134
3135         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136         if (!err) {
3137                 u32 err2;
3138
3139                 val = 0;
3140                 /* Advertise 100-BaseTX EEE ability */
3141                 if (advertise & ADVERTISED_100baseT_Full)
3142                         val |= MDIO_AN_EEE_ADV_100TX;
3143                 /* Advertise 1000-BaseT EEE ability */
3144                 if (advertise & ADVERTISED_1000baseT_Full)
3145                         val |= MDIO_AN_EEE_ADV_1000T;
3146                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147                 if (err)
3148                         val = 0;
3149
3150                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151                 case ASIC_REV_5717:
3152                 case ASIC_REV_57765:
3153                 case ASIC_REV_5719:
3154                         /* If we advertised any eee advertisements above... */
3155                         if (val)
3156                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3157                                       MII_TG3_DSP_TAP26_RMRXSTO |
3158                                       MII_TG3_DSP_TAP26_OPCSINPT;
3159                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160                         /* Fall through */
3161                 case ASIC_REV_5720:
3162                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3165                 }
3166
3167                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168                 if (!err)
3169                         err = err2;
3170         }
3171
3172 done:
3173         return err;
3174 }
3175
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3177 {
3178         u32 new_adv;
3179         int i;
3180
3181         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182                 new_adv = ADVERTISED_10baseT_Half |
3183                           ADVERTISED_10baseT_Full;
3184                 if (tg3_flag(tp, WOL_SPEED_100MB))
3185                         new_adv |= ADVERTISED_100baseT_Half |
3186                                    ADVERTISED_100baseT_Full;
3187
3188                 tg3_phy_autoneg_cfg(tp, new_adv,
3189                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3190         } else if (tp->link_config.speed == SPEED_INVALID) {
3191                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192                         tp->link_config.advertising &=
3193                                 ~(ADVERTISED_1000baseT_Half |
3194                                   ADVERTISED_1000baseT_Full);
3195
3196                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197                                     tp->link_config.flowctrl);
3198         } else {
3199                 /* Asking for a specific link mode. */
3200                 if (tp->link_config.speed == SPEED_1000) {
3201                         if (tp->link_config.duplex == DUPLEX_FULL)
3202                                 new_adv = ADVERTISED_1000baseT_Full;
3203                         else
3204                                 new_adv = ADVERTISED_1000baseT_Half;
3205                 } else if (tp->link_config.speed == SPEED_100) {
3206                         if (tp->link_config.duplex == DUPLEX_FULL)
3207                                 new_adv = ADVERTISED_100baseT_Full;
3208                         else
3209                                 new_adv = ADVERTISED_100baseT_Half;
3210                 } else {
3211                         if (tp->link_config.duplex == DUPLEX_FULL)
3212                                 new_adv = ADVERTISED_10baseT_Full;
3213                         else
3214                                 new_adv = ADVERTISED_10baseT_Half;
3215                 }
3216
3217                 tg3_phy_autoneg_cfg(tp, new_adv,
3218                                     tp->link_config.flowctrl);
3219         }
3220
3221         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222             tp->link_config.speed != SPEED_INVALID) {
3223                 u32 bmcr, orig_bmcr;
3224
3225                 tp->link_config.active_speed = tp->link_config.speed;
3226                 tp->link_config.active_duplex = tp->link_config.duplex;
3227
3228                 bmcr = 0;
3229                 switch (tp->link_config.speed) {
3230                 default:
3231                 case SPEED_10:
3232                         break;
3233
3234                 case SPEED_100:
3235                         bmcr |= BMCR_SPEED100;
3236                         break;
3237
3238                 case SPEED_1000:
3239                         bmcr |= BMCR_SPEED1000;
3240                         break;
3241                 }
3242
3243                 if (tp->link_config.duplex == DUPLEX_FULL)
3244                         bmcr |= BMCR_FULLDPLX;
3245
3246                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247                     (bmcr != orig_bmcr)) {
3248                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249                         for (i = 0; i < 1500; i++) {
3250                                 u32 tmp;
3251
3252                                 udelay(10);
3253                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254                                     tg3_readphy(tp, MII_BMSR, &tmp))
3255                                         continue;
3256                                 if (!(tmp & BMSR_LSTATUS)) {
3257                                         udelay(40);
3258                                         break;
3259                                 }
3260                         }
3261                         tg3_writephy(tp, MII_BMCR, bmcr);
3262                         udelay(40);
3263                 }
3264         } else {
3265                 tg3_writephy(tp, MII_BMCR,
3266                              BMCR_ANENABLE | BMCR_ANRESTART);
3267         }
3268 }
3269
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3271 {
3272         int err;
3273
3274         /* Turn off tap power management. */
3275         /* Set Extended packet length bit */
3276         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3277
3278         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3283
3284         udelay(40);
3285
3286         return err;
3287 }
3288
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3290 {
3291         u32 adv_reg, all_mask = 0;
3292
3293         if (mask & ADVERTISED_10baseT_Half)
3294                 all_mask |= ADVERTISE_10HALF;
3295         if (mask & ADVERTISED_10baseT_Full)
3296                 all_mask |= ADVERTISE_10FULL;
3297         if (mask & ADVERTISED_100baseT_Half)
3298                 all_mask |= ADVERTISE_100HALF;
3299         if (mask & ADVERTISED_100baseT_Full)
3300                 all_mask |= ADVERTISE_100FULL;
3301
3302         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303                 return 0;
3304
3305         if ((adv_reg & all_mask) != all_mask)
3306                 return 0;
3307         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308                 u32 tg3_ctrl;
3309
3310                 all_mask = 0;
3311                 if (mask & ADVERTISED_1000baseT_Half)
3312                         all_mask |= ADVERTISE_1000HALF;
3313                 if (mask & ADVERTISED_1000baseT_Full)
3314                         all_mask |= ADVERTISE_1000FULL;
3315
3316                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317                         return 0;
3318
3319                 if ((tg3_ctrl & all_mask) != all_mask)
3320                         return 0;
3321         }
3322         return 1;
3323 }
3324
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3326 {
3327         u32 curadv, reqadv;
3328
3329         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330                 return 1;
3331
3332         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3334
3335         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336                 if (curadv != reqadv)
3337                         return 0;
3338
3339                 if (tg3_flag(tp, PAUSE_AUTONEG))
3340                         tg3_readphy(tp, MII_LPA, rmtadv);
3341         } else {
3342                 /* Reprogram the advertisement register, even if it
3343                  * does not affect the current link.  If the link
3344                  * gets renegotiated in the future, we can save an
3345                  * additional renegotiation cycle by advertising
3346                  * it correctly in the first place.
3347                  */
3348                 if (curadv != reqadv) {
3349                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350                                      ADVERTISE_PAUSE_ASYM);
3351                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3352                 }
3353         }
3354
3355         return 1;
3356 }
3357
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3359 {
3360         int current_link_up;
3361         u32 bmsr, val;
3362         u32 lcl_adv, rmt_adv;
3363         u16 current_speed;
3364         u8 current_duplex;
3365         int i, err;
3366
3367         tw32(MAC_EVENT, 0);
3368
3369         tw32_f(MAC_STATUS,
3370              (MAC_STATUS_SYNC_CHANGED |
3371               MAC_STATUS_CFG_CHANGED |
3372               MAC_STATUS_MI_COMPLETION |
3373               MAC_STATUS_LNKSTATE_CHANGED));
3374         udelay(40);
3375
3376         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377                 tw32_f(MAC_MI_MODE,
3378                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379                 udelay(80);
3380         }
3381
3382         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3383
3384         /* Some third-party PHYs need to be reset on link going
3385          * down.
3386          */
3387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390             netif_carrier_ok(tp->dev)) {
3391                 tg3_readphy(tp, MII_BMSR, &bmsr);
3392                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393                     !(bmsr & BMSR_LSTATUS))
3394                         force_reset = 1;
3395         }
3396         if (force_reset)
3397                 tg3_phy_reset(tp);
3398
3399         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400                 tg3_readphy(tp, MII_BMSR, &bmsr);
3401                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402                     !tg3_flag(tp, INIT_COMPLETE))
3403                         bmsr = 0;
3404
3405                 if (!(bmsr & BMSR_LSTATUS)) {
3406                         err = tg3_init_5401phy_dsp(tp);
3407                         if (err)
3408                                 return err;
3409
3410                         tg3_readphy(tp, MII_BMSR, &bmsr);
3411                         for (i = 0; i < 1000; i++) {
3412                                 udelay(10);
3413                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414                                     (bmsr & BMSR_LSTATUS)) {
3415                                         udelay(40);
3416                                         break;
3417                                 }
3418                         }
3419
3420                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421                             TG3_PHY_REV_BCM5401_B0 &&
3422                             !(bmsr & BMSR_LSTATUS) &&
3423                             tp->link_config.active_speed == SPEED_1000) {
3424                                 err = tg3_phy_reset(tp);
3425                                 if (!err)
3426                                         err = tg3_init_5401phy_dsp(tp);
3427                                 if (err)
3428                                         return err;
3429                         }
3430                 }
3431         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433                 /* 5701 {A0,B0} CRC bug workaround */
3434                 tg3_writephy(tp, 0x15, 0x0a75);
3435                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3438         }
3439
3440         /* Clear pending interrupts... */
3441         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443
3444         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3448
3449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454                 else
3455                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3456         }
3457
3458         current_link_up = 0;
3459         current_speed = SPEED_INVALID;
3460         current_duplex = DUPLEX_INVALID;
3461
3462         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463                 err = tg3_phy_auxctl_read(tp,
3464                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465                                           &val);
3466                 if (!err && !(val & (1 << 10))) {
3467                         tg3_phy_auxctl_write(tp,
3468                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469                                              val | (1 << 10));
3470                         goto relink;
3471                 }
3472         }
3473
3474         bmsr = 0;
3475         for (i = 0; i < 100; i++) {
3476                 tg3_readphy(tp, MII_BMSR, &bmsr);
3477                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478                     (bmsr & BMSR_LSTATUS))
3479                         break;
3480                 udelay(40);
3481         }
3482
3483         if (bmsr & BMSR_LSTATUS) {
3484                 u32 aux_stat, bmcr;
3485
3486                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487                 for (i = 0; i < 2000; i++) {
3488                         udelay(10);
3489                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490                             aux_stat)
3491                                 break;
3492                 }
3493
3494                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495                                              &current_speed,
3496                                              &current_duplex);
3497
3498                 bmcr = 0;
3499                 for (i = 0; i < 200; i++) {
3500                         tg3_readphy(tp, MII_BMCR, &bmcr);
3501                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502                                 continue;
3503                         if (bmcr && bmcr != 0x7fff)
3504                                 break;
3505                         udelay(10);
3506                 }
3507
3508                 lcl_adv = 0;
3509                 rmt_adv = 0;
3510
3511                 tp->link_config.active_speed = current_speed;
3512                 tp->link_config.active_duplex = current_duplex;
3513
3514                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515                         if ((bmcr & BMCR_ANENABLE) &&
3516                             tg3_copper_is_advertising_all(tp,
3517                                                 tp->link_config.advertising)) {
3518                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519                                                                   &rmt_adv))
3520                                         current_link_up = 1;
3521                         }
3522                 } else {
3523                         if (!(bmcr & BMCR_ANENABLE) &&
3524                             tp->link_config.speed == current_speed &&
3525                             tp->link_config.duplex == current_duplex &&
3526                             tp->link_config.flowctrl ==
3527                             tp->link_config.active_flowctrl) {
3528                                 current_link_up = 1;
3529                         }
3530                 }
3531
3532                 if (current_link_up == 1 &&
3533                     tp->link_config.active_duplex == DUPLEX_FULL)
3534                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3535         }
3536
3537 relink:
3538         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539                 tg3_phy_copper_begin(tp);
3540
3541                 tg3_readphy(tp, MII_BMSR, &bmsr);
3542                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544                         current_link_up = 1;
3545         }
3546
3547         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548         if (current_link_up == 1) {
3549                 if (tp->link_config.active_speed == SPEED_100 ||
3550                     tp->link_config.active_speed == SPEED_10)
3551                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552                 else
3553                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556         else
3557                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3558
3559         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560         if (tp->link_config.active_duplex == DUPLEX_HALF)
3561                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3562
3563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564                 if (current_link_up == 1 &&
3565                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567                 else
3568                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3569         }
3570
3571         /* ??? Without this setting Netgear GA302T PHY does not
3572          * ??? send/receive packets...
3573          */
3574         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578                 udelay(80);
3579         }
3580
3581         tw32_f(MAC_MODE, tp->mac_mode);
3582         udelay(40);
3583
3584         tg3_phy_eee_adjust(tp, current_link_up);
3585
3586         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587                 /* Polled via timer. */
3588                 tw32_f(MAC_EVENT, 0);
3589         } else {
3590                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3591         }
3592         udelay(40);
3593
3594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595             current_link_up == 1 &&
3596             tp->link_config.active_speed == SPEED_1000 &&
3597             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598                 udelay(120);
3599                 tw32_f(MAC_STATUS,
3600                      (MAC_STATUS_SYNC_CHANGED |
3601                       MAC_STATUS_CFG_CHANGED));
3602                 udelay(40);
3603                 tg3_write_mem(tp,
3604                               NIC_SRAM_FIRMWARE_MBOX,
3605                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3606         }
3607
3608         /* Prevent send BD corruption. */
3609         if (tg3_flag(tp, CLKREQ_BUG)) {
3610                 u16 oldlnkctl, newlnkctl;
3611
3612                 pci_read_config_word(tp->pdev,
3613                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614                                      &oldlnkctl);
3615                 if (tp->link_config.active_speed == SPEED_100 ||
3616                     tp->link_config.active_speed == SPEED_10)
3617                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618                 else
3619                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620                 if (newlnkctl != oldlnkctl)
3621                         pci_write_config_word(tp->pdev,
3622                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623                                               newlnkctl);
3624         }
3625
3626         if (current_link_up != netif_carrier_ok(tp->dev)) {
3627                 if (current_link_up)
3628                         netif_carrier_on(tp->dev);
3629                 else
3630                         netif_carrier_off(tp->dev);
3631                 tg3_link_report(tp);
3632         }
3633
3634         return 0;
3635 }
3636
3637 struct tg3_fiber_aneginfo {
3638         int state;
3639 #define ANEG_STATE_UNKNOWN              0
3640 #define ANEG_STATE_AN_ENABLE            1
3641 #define ANEG_STATE_RESTART_INIT         2
3642 #define ANEG_STATE_RESTART              3
3643 #define ANEG_STATE_DISABLE_LINK_OK      4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3645 #define ANEG_STATE_ABILITY_DETECT       6
3646 #define ANEG_STATE_ACK_DETECT_INIT      7
3647 #define ANEG_STATE_ACK_DETECT           8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3649 #define ANEG_STATE_COMPLETE_ACK         10
3650 #define ANEG_STATE_IDLE_DETECT_INIT     11
3651 #define ANEG_STATE_IDLE_DETECT          12
3652 #define ANEG_STATE_LINK_OK              13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3655
3656         u32 flags;
3657 #define MR_AN_ENABLE            0x00000001
3658 #define MR_RESTART_AN           0x00000002
3659 #define MR_AN_COMPLETE          0x00000004
3660 #define MR_PAGE_RX              0x00000008
3661 #define MR_NP_LOADED            0x00000010
3662 #define MR_TOGGLE_TX            0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3670 #define MR_TOGGLE_RX            0x00002000
3671 #define MR_NP_RX                0x00004000
3672
3673 #define MR_LINK_OK              0x80000000
3674
3675         unsigned long link_time, cur_time;
3676
3677         u32 ability_match_cfg;
3678         int ability_match_count;
3679
3680         char ability_match, idle_match, ack_match;
3681
3682         u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP             0x00000080
3684 #define ANEG_CFG_ACK            0x00000040
3685 #define ANEG_CFG_RF2            0x00000020
3686 #define ANEG_CFG_RF1            0x00000010
3687 #define ANEG_CFG_PS2            0x00000001
3688 #define ANEG_CFG_PS1            0x00008000
3689 #define ANEG_CFG_HD             0x00004000
3690 #define ANEG_CFG_FD             0x00002000
3691 #define ANEG_CFG_INVAL          0x00001f06
3692
3693 };
3694 #define ANEG_OK         0
3695 #define ANEG_DONE       1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED     -1
3698
3699 #define ANEG_STATE_SETTLE_TIME  10000
3700
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702                                    struct tg3_fiber_aneginfo *ap)
3703 {
3704         u16 flowctrl;
3705         unsigned long delta;
3706         u32 rx_cfg_reg;
3707         int ret;
3708
3709         if (ap->state == ANEG_STATE_UNKNOWN) {
3710                 ap->rxconfig = 0;
3711                 ap->link_time = 0;
3712                 ap->cur_time = 0;
3713                 ap->ability_match_cfg = 0;
3714                 ap->ability_match_count = 0;
3715                 ap->ability_match = 0;
3716                 ap->idle_match = 0;
3717                 ap->ack_match = 0;
3718         }
3719         ap->cur_time++;
3720
3721         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3723
3724                 if (rx_cfg_reg != ap->ability_match_cfg) {
3725                         ap->ability_match_cfg = rx_cfg_reg;
3726                         ap->ability_match = 0;
3727                         ap->ability_match_count = 0;
3728                 } else {
3729                         if (++ap->ability_match_count > 1) {
3730                                 ap->ability_match = 1;
3731                                 ap->ability_match_cfg = rx_cfg_reg;
3732                         }
3733                 }
3734                 if (rx_cfg_reg & ANEG_CFG_ACK)
3735                         ap->ack_match = 1;
3736                 else
3737                         ap->ack_match = 0;
3738
3739                 ap->idle_match = 0;
3740         } else {
3741                 ap->idle_match = 1;
3742                 ap->ability_match_cfg = 0;
3743                 ap->ability_match_count = 0;
3744                 ap->ability_match = 0;
3745                 ap->ack_match = 0;
3746
3747                 rx_cfg_reg = 0;
3748         }
3749
3750         ap->rxconfig = rx_cfg_reg;
3751         ret = ANEG_OK;
3752
3753         switch (ap->state) {
3754         case ANEG_STATE_UNKNOWN:
3755                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756                         ap->state = ANEG_STATE_AN_ENABLE;
3757
3758                 /* fallthru */
3759         case ANEG_STATE_AN_ENABLE:
3760                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761                 if (ap->flags & MR_AN_ENABLE) {
3762                         ap->link_time = 0;
3763                         ap->cur_time = 0;
3764                         ap->ability_match_cfg = 0;
3765                         ap->ability_match_count = 0;
3766                         ap->ability_match = 0;
3767                         ap->idle_match = 0;
3768                         ap->ack_match = 0;
3769
3770                         ap->state = ANEG_STATE_RESTART_INIT;
3771                 } else {
3772                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3773                 }
3774                 break;
3775
3776         case ANEG_STATE_RESTART_INIT:
3777                 ap->link_time = ap->cur_time;
3778                 ap->flags &= ~(MR_NP_LOADED);
3779                 ap->txconfig = 0;
3780                 tw32(MAC_TX_AUTO_NEG, 0);
3781                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782                 tw32_f(MAC_MODE, tp->mac_mode);
3783                 udelay(40);
3784
3785                 ret = ANEG_TIMER_ENAB;
3786                 ap->state = ANEG_STATE_RESTART;
3787
3788                 /* fallthru */
3789         case ANEG_STATE_RESTART:
3790                 delta = ap->cur_time - ap->link_time;
3791                 if (delta > ANEG_STATE_SETTLE_TIME)
3792                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793                 else
3794                         ret = ANEG_TIMER_ENAB;
3795                 break;
3796
3797         case ANEG_STATE_DISABLE_LINK_OK:
3798                 ret = ANEG_DONE;
3799                 break;
3800
3801         case ANEG_STATE_ABILITY_DETECT_INIT:
3802                 ap->flags &= ~(MR_TOGGLE_TX);
3803                 ap->txconfig = ANEG_CFG_FD;
3804                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805                 if (flowctrl & ADVERTISE_1000XPAUSE)
3806                         ap->txconfig |= ANEG_CFG_PS1;
3807                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808                         ap->txconfig |= ANEG_CFG_PS2;
3809                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811                 tw32_f(MAC_MODE, tp->mac_mode);
3812                 udelay(40);
3813
3814                 ap->state = ANEG_STATE_ABILITY_DETECT;
3815                 break;
3816
3817         case ANEG_STATE_ABILITY_DETECT:
3818                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820                 break;
3821
3822         case ANEG_STATE_ACK_DETECT_INIT:
3823                 ap->txconfig |= ANEG_CFG_ACK;
3824                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826                 tw32_f(MAC_MODE, tp->mac_mode);
3827                 udelay(40);
3828
3829                 ap->state = ANEG_STATE_ACK_DETECT;
3830
3831                 /* fallthru */
3832         case ANEG_STATE_ACK_DETECT:
3833                 if (ap->ack_match != 0) {
3834                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837                         } else {
3838                                 ap->state = ANEG_STATE_AN_ENABLE;
3839                         }
3840                 } else if (ap->ability_match != 0 &&
3841                            ap->rxconfig == 0) {
3842                         ap->state = ANEG_STATE_AN_ENABLE;
3843                 }
3844                 break;
3845
3846         case ANEG_STATE_COMPLETE_ACK_INIT:
3847                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848                         ret = ANEG_FAILED;
3849                         break;
3850                 }
3851                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852                                MR_LP_ADV_HALF_DUPLEX |
3853                                MR_LP_ADV_SYM_PAUSE |
3854                                MR_LP_ADV_ASYM_PAUSE |
3855                                MR_LP_ADV_REMOTE_FAULT1 |
3856                                MR_LP_ADV_REMOTE_FAULT2 |
3857                                MR_LP_ADV_NEXT_PAGE |
3858                                MR_TOGGLE_RX |
3859                                MR_NP_RX);
3860                 if (ap->rxconfig & ANEG_CFG_FD)
3861                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862                 if (ap->rxconfig & ANEG_CFG_HD)
3863                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864                 if (ap->rxconfig & ANEG_CFG_PS1)
3865                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866                 if (ap->rxconfig & ANEG_CFG_PS2)
3867                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868                 if (ap->rxconfig & ANEG_CFG_RF1)
3869                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870                 if (ap->rxconfig & ANEG_CFG_RF2)
3871                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872                 if (ap->rxconfig & ANEG_CFG_NP)
3873                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3874
3875                 ap->link_time = ap->cur_time;
3876
3877                 ap->flags ^= (MR_TOGGLE_TX);
3878                 if (ap->rxconfig & 0x0008)
3879                         ap->flags |= MR_TOGGLE_RX;
3880                 if (ap->rxconfig & ANEG_CFG_NP)
3881                         ap->flags |= MR_NP_RX;
3882                 ap->flags |= MR_PAGE_RX;
3883
3884                 ap->state = ANEG_STATE_COMPLETE_ACK;
3885                 ret = ANEG_TIMER_ENAB;
3886                 break;
3887
3888         case ANEG_STATE_COMPLETE_ACK:
3889                 if (ap->ability_match != 0 &&
3890                     ap->rxconfig == 0) {
3891                         ap->state = ANEG_STATE_AN_ENABLE;
3892                         break;
3893                 }
3894                 delta = ap->cur_time - ap->link_time;
3895                 if (delta > ANEG_STATE_SETTLE_TIME) {
3896                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898                         } else {
3899                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900                                     !(ap->flags & MR_NP_RX)) {
3901                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902                                 } else {
3903                                         ret = ANEG_FAILED;
3904                                 }
3905                         }
3906                 }
3907                 break;
3908
3909         case ANEG_STATE_IDLE_DETECT_INIT:
3910                 ap->link_time = ap->cur_time;
3911                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912                 tw32_f(MAC_MODE, tp->mac_mode);
3913                 udelay(40);
3914
3915                 ap->state = ANEG_STATE_IDLE_DETECT;
3916                 ret = ANEG_TIMER_ENAB;
3917                 break;
3918
3919         case ANEG_STATE_IDLE_DETECT:
3920                 if (ap->ability_match != 0 &&
3921                     ap->rxconfig == 0) {
3922                         ap->state = ANEG_STATE_AN_ENABLE;
3923                         break;
3924                 }
3925                 delta = ap->cur_time - ap->link_time;
3926                 if (delta > ANEG_STATE_SETTLE_TIME) {
3927                         /* XXX another gem from the Broadcom driver :( */
3928                         ap->state = ANEG_STATE_LINK_OK;
3929                 }
3930                 break;
3931
3932         case ANEG_STATE_LINK_OK:
3933                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934                 ret = ANEG_DONE;
3935                 break;
3936
3937         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938                 /* ??? unimplemented */
3939                 break;
3940
3941         case ANEG_STATE_NEXT_PAGE_WAIT:
3942                 /* ??? unimplemented */
3943                 break;
3944
3945         default:
3946                 ret = ANEG_FAILED;
3947                 break;
3948         }
3949
3950         return ret;
3951 }
3952
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3954 {
3955         int res = 0;
3956         struct tg3_fiber_aneginfo aninfo;
3957         int status = ANEG_FAILED;
3958         unsigned int tick;
3959         u32 tmp;
3960
3961         tw32_f(MAC_TX_AUTO_NEG, 0);
3962
3963         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965         udelay(40);
3966
3967         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968         udelay(40);
3969
3970         memset(&aninfo, 0, sizeof(aninfo));
3971         aninfo.flags |= MR_AN_ENABLE;
3972         aninfo.state = ANEG_STATE_UNKNOWN;
3973         aninfo.cur_time = 0;
3974         tick = 0;
3975         while (++tick < 195000) {
3976                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977                 if (status == ANEG_DONE || status == ANEG_FAILED)
3978                         break;
3979
3980                 udelay(1);
3981         }
3982
3983         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984         tw32_f(MAC_MODE, tp->mac_mode);
3985         udelay(40);
3986
3987         *txflags = aninfo.txconfig;
3988         *rxflags = aninfo.flags;
3989
3990         if (status == ANEG_DONE &&
3991             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992                              MR_LP_ADV_FULL_DUPLEX)))
3993                 res = 1;
3994
3995         return res;
3996 }
3997
3998 static void tg3_init_bcm8002(struct tg3 *tp)
3999 {
4000         u32 mac_status = tr32(MAC_STATUS);
4001         int i;
4002
4003         /* Reset when initting first time or we have a link. */
4004         if (tg3_flag(tp, INIT_COMPLETE) &&
4005             !(mac_status & MAC_STATUS_PCS_SYNCED))
4006                 return;
4007
4008         /* Set PLL lock range. */
4009         tg3_writephy(tp, 0x16, 0x8007);
4010
4011         /* SW reset */
4012         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4013
4014         /* Wait for reset to complete. */
4015         /* XXX schedule_timeout() ... */
4016         for (i = 0; i < 500; i++)
4017                 udelay(10);
4018
4019         /* Config mode; select PMA/Ch 1 regs. */
4020         tg3_writephy(tp, 0x10, 0x8411);
4021
4022         /* Enable auto-lock and comdet, select txclk for tx. */
4023         tg3_writephy(tp, 0x11, 0x0a10);
4024
4025         tg3_writephy(tp, 0x18, 0x00a0);
4026         tg3_writephy(tp, 0x16, 0x41ff);
4027
4028         /* Assert and deassert POR. */
4029         tg3_writephy(tp, 0x13, 0x0400);
4030         udelay(40);
4031         tg3_writephy(tp, 0x13, 0x0000);
4032
4033         tg3_writephy(tp, 0x11, 0x0a50);
4034         udelay(40);
4035         tg3_writephy(tp, 0x11, 0x0a10);
4036
4037         /* Wait for signal to stabilize */
4038         /* XXX schedule_timeout() ... */
4039         for (i = 0; i < 15000; i++)
4040                 udelay(10);
4041
4042         /* Deselect the channel register so we can read the PHYID
4043          * later.
4044          */
4045         tg3_writephy(tp, 0x10, 0x8011);
4046 }
4047
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4049 {
4050         u16 flowctrl;
4051         u32 sg_dig_ctrl, sg_dig_status;
4052         u32 serdes_cfg, expected_sg_dig_ctrl;
4053         int workaround, port_a;
4054         int current_link_up;
4055
4056         serdes_cfg = 0;
4057         expected_sg_dig_ctrl = 0;
4058         workaround = 0;
4059         port_a = 1;
4060         current_link_up = 0;
4061
4062         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064                 workaround = 1;
4065                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066                         port_a = 0;
4067
4068                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069                 /* preserve bits 20-23 for voltage regulator */
4070                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4071         }
4072
4073         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4074
4075         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077                         if (workaround) {
4078                                 u32 val = serdes_cfg;
4079
4080                                 if (port_a)
4081                                         val |= 0xc010000;
4082                                 else
4083                                         val |= 0x4010000;
4084                                 tw32_f(MAC_SERDES_CFG, val);
4085                         }
4086
4087                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4088                 }
4089                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090                         tg3_setup_flow_control(tp, 0, 0);
4091                         current_link_up = 1;
4092                 }
4093                 goto out;
4094         }
4095
4096         /* Want auto-negotiation.  */
4097         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4098
4099         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100         if (flowctrl & ADVERTISE_1000XPAUSE)
4101                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4104
4105         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107                     tp->serdes_counter &&
4108                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109                                     MAC_STATUS_RCVD_CFG)) ==
4110                      MAC_STATUS_PCS_SYNCED)) {
4111                         tp->serdes_counter--;
4112                         current_link_up = 1;
4113                         goto out;
4114                 }
4115 restart_autoneg:
4116                 if (workaround)
4117                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119                 udelay(5);
4120                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4121
4122                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125                                  MAC_STATUS_SIGNAL_DET)) {
4126                 sg_dig_status = tr32(SG_DIG_STATUS);
4127                 mac_status = tr32(MAC_STATUS);
4128
4129                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131                         u32 local_adv = 0, remote_adv = 0;
4132
4133                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134                                 local_adv |= ADVERTISE_1000XPAUSE;
4135                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4137
4138                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139                                 remote_adv |= LPA_1000XPAUSE;
4140                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4142
4143                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4144                         current_link_up = 1;
4145                         tp->serdes_counter = 0;
4146                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148                         if (tp->serdes_counter)
4149                                 tp->serdes_counter--;
4150                         else {
4151                                 if (workaround) {
4152                                         u32 val = serdes_cfg;
4153
4154                                         if (port_a)
4155                                                 val |= 0xc010000;
4156                                         else
4157                                                 val |= 0x4010000;
4158
4159                                         tw32_f(MAC_SERDES_CFG, val);
4160                                 }
4161
4162                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163                                 udelay(40);
4164
4165                                 /* Link parallel detection - link is up */
4166                                 /* only if we have PCS_SYNC and not */
4167                                 /* receiving config code words */
4168                                 mac_status = tr32(MAC_STATUS);
4169                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171                                         tg3_setup_flow_control(tp, 0, 0);
4172                                         current_link_up = 1;
4173                                         tp->phy_flags |=
4174                                                 TG3_PHYFLG_PARALLEL_DETECT;
4175                                         tp->serdes_counter =
4176                                                 SERDES_PARALLEL_DET_TIMEOUT;
4177                                 } else
4178                                         goto restart_autoneg;
4179                         }
4180                 }
4181         } else {
4182                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4184         }
4185
4186 out:
4187         return current_link_up;
4188 }
4189
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4191 {
4192         int current_link_up = 0;
4193
4194         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195                 goto out;
4196
4197         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198                 u32 txflags, rxflags;
4199                 int i;
4200
4201                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202                         u32 local_adv = 0, remote_adv = 0;
4203
4204                         if (txflags & ANEG_CFG_PS1)
4205                                 local_adv |= ADVERTISE_1000XPAUSE;
4206                         if (txflags & ANEG_CFG_PS2)
4207                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4208
4209                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210                                 remote_adv |= LPA_1000XPAUSE;
4211                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4213
4214                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4215
4216                         current_link_up = 1;
4217                 }
4218                 for (i = 0; i < 30; i++) {
4219                         udelay(20);
4220                         tw32_f(MAC_STATUS,
4221                                (MAC_STATUS_SYNC_CHANGED |
4222                                 MAC_STATUS_CFG_CHANGED));
4223                         udelay(40);
4224                         if ((tr32(MAC_STATUS) &
4225                              (MAC_STATUS_SYNC_CHANGED |
4226                               MAC_STATUS_CFG_CHANGED)) == 0)
4227                                 break;
4228                 }
4229
4230                 mac_status = tr32(MAC_STATUS);
4231                 if (current_link_up == 0 &&
4232                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233                     !(mac_status & MAC_STATUS_RCVD_CFG))
4234                         current_link_up = 1;
4235         } else {
4236                 tg3_setup_flow_control(tp, 0, 0);
4237
4238                 /* Forcing 1000FD link up. */
4239                 current_link_up = 1;
4240
4241                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242                 udelay(40);
4243
4244                 tw32_f(MAC_MODE, tp->mac_mode);
4245                 udelay(40);
4246         }
4247
4248 out:
4249         return current_link_up;
4250 }
4251
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4253 {
4254         u32 orig_pause_cfg;
4255         u16 orig_active_speed;
4256         u8 orig_active_duplex;
4257         u32 mac_status;
4258         int current_link_up;
4259         int i;
4260
4261         orig_pause_cfg = tp->link_config.active_flowctrl;
4262         orig_active_speed = tp->link_config.active_speed;
4263         orig_active_duplex = tp->link_config.active_duplex;
4264
4265         if (!tg3_flag(tp, HW_AUTONEG) &&
4266             netif_carrier_ok(tp->dev) &&
4267             tg3_flag(tp, INIT_COMPLETE)) {
4268                 mac_status = tr32(MAC_STATUS);
4269                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270                                MAC_STATUS_SIGNAL_DET |
4271                                MAC_STATUS_CFG_CHANGED |
4272                                MAC_STATUS_RCVD_CFG);
4273                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274                                    MAC_STATUS_SIGNAL_DET)) {
4275                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276                                             MAC_STATUS_CFG_CHANGED));
4277                         return 0;
4278                 }
4279         }
4280
4281         tw32_f(MAC_TX_AUTO_NEG, 0);
4282
4283         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285         tw32_f(MAC_MODE, tp->mac_mode);
4286         udelay(40);
4287
4288         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289                 tg3_init_bcm8002(tp);
4290
4291         /* Enable link change event even when serdes polling.  */
4292         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293         udelay(40);
4294
4295         current_link_up = 0;
4296         mac_status = tr32(MAC_STATUS);
4297
4298         if (tg3_flag(tp, HW_AUTONEG))
4299                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300         else
4301                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4302
4303         tp->napi[0].hw_status->status =
4304                 (SD_STATUS_UPDATED |
4305                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4306
4307         for (i = 0; i < 100; i++) {
4308                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309                                     MAC_STATUS_CFG_CHANGED));
4310                 udelay(5);
4311                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312                                          MAC_STATUS_CFG_CHANGED |
4313                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314                         break;
4315         }
4316
4317         mac_status = tr32(MAC_STATUS);
4318         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319                 current_link_up = 0;
4320                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321                     tp->serdes_counter == 0) {
4322                         tw32_f(MAC_MODE, (tp->mac_mode |
4323                                           MAC_MODE_SEND_CONFIGS));
4324                         udelay(1);
4325                         tw32_f(MAC_MODE, tp->mac_mode);
4326                 }
4327         }
4328
4329         if (current_link_up == 1) {
4330                 tp->link_config.active_speed = SPEED_1000;
4331                 tp->link_config.active_duplex = DUPLEX_FULL;
4332                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333                                     LED_CTRL_LNKLED_OVERRIDE |
4334                                     LED_CTRL_1000MBPS_ON));
4335         } else {
4336                 tp->link_config.active_speed = SPEED_INVALID;
4337                 tp->link_config.active_duplex = DUPLEX_INVALID;
4338                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339                                     LED_CTRL_LNKLED_OVERRIDE |
4340                                     LED_CTRL_TRAFFIC_OVERRIDE));
4341         }
4342
4343         if (current_link_up != netif_carrier_ok(tp->dev)) {
4344                 if (current_link_up)
4345                         netif_carrier_on(tp->dev);
4346                 else
4347                         netif_carrier_off(tp->dev);
4348                 tg3_link_report(tp);
4349         } else {
4350                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351                 if (orig_pause_cfg != now_pause_cfg ||
4352                     orig_active_speed != tp->link_config.active_speed ||
4353                     orig_active_duplex != tp->link_config.active_duplex)
4354                         tg3_link_report(tp);
4355         }
4356
4357         return 0;
4358 }
4359
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4361 {
4362         int current_link_up, err = 0;
4363         u32 bmsr, bmcr;
4364         u16 current_speed;
4365         u8 current_duplex;
4366         u32 local_adv, remote_adv;
4367
4368         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369         tw32_f(MAC_MODE, tp->mac_mode);
4370         udelay(40);
4371
4372         tw32(MAC_EVENT, 0);
4373
4374         tw32_f(MAC_STATUS,
4375              (MAC_STATUS_SYNC_CHANGED |
4376               MAC_STATUS_CFG_CHANGED |
4377               MAC_STATUS_MI_COMPLETION |
4378               MAC_STATUS_LNKSTATE_CHANGED));
4379         udelay(40);
4380
4381         if (force_reset)
4382                 tg3_phy_reset(tp);
4383
4384         current_link_up = 0;
4385         current_speed = SPEED_INVALID;
4386         current_duplex = DUPLEX_INVALID;
4387
4388         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392                         bmsr |= BMSR_LSTATUS;
4393                 else
4394                         bmsr &= ~BMSR_LSTATUS;
4395         }
4396
4397         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4398
4399         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401                 /* do nothing, just check for link up at the end */
4402         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403                 u32 adv, new_adv;
4404
4405                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407                                   ADVERTISE_1000XPAUSE |
4408                                   ADVERTISE_1000XPSE_ASYM |
4409                                   ADVERTISE_SLCT);
4410
4411                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4412
4413                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414                         new_adv |= ADVERTISE_1000XHALF;
4415                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416                         new_adv |= ADVERTISE_1000XFULL;
4417
4418                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421                         tg3_writephy(tp, MII_BMCR, bmcr);
4422
4423                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427                         return err;
4428                 }
4429         } else {
4430                 u32 new_bmcr;
4431
4432                 bmcr &= ~BMCR_SPEED1000;
4433                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4434
4435                 if (tp->link_config.duplex == DUPLEX_FULL)
4436                         new_bmcr |= BMCR_FULLDPLX;
4437
4438                 if (new_bmcr != bmcr) {
4439                         /* BMCR_SPEED1000 is a reserved bit that needs
4440                          * to be set on write.
4441                          */
4442                         new_bmcr |= BMCR_SPEED1000;
4443
4444                         /* Force a linkdown */
4445                         if (netif_carrier_ok(tp->dev)) {
4446                                 u32 adv;
4447
4448                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449                                 adv &= ~(ADVERTISE_1000XFULL |
4450                                          ADVERTISE_1000XHALF |
4451                                          ADVERTISE_SLCT);
4452                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4453                                 tg3_writephy(tp, MII_BMCR, bmcr |
4454                                                            BMCR_ANRESTART |
4455                                                            BMCR_ANENABLE);
4456                                 udelay(10);
4457                                 netif_carrier_off(tp->dev);
4458                         }
4459                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4460                         bmcr = new_bmcr;
4461                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464                             ASIC_REV_5714) {
4465                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466                                         bmsr |= BMSR_LSTATUS;
4467                                 else
4468                                         bmsr &= ~BMSR_LSTATUS;
4469                         }
4470                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4471                 }
4472         }
4473
4474         if (bmsr & BMSR_LSTATUS) {
4475                 current_speed = SPEED_1000;
4476                 current_link_up = 1;
4477                 if (bmcr & BMCR_FULLDPLX)
4478                         current_duplex = DUPLEX_FULL;
4479                 else
4480                         current_duplex = DUPLEX_HALF;
4481
4482                 local_adv = 0;
4483                 remote_adv = 0;
4484
4485                 if (bmcr & BMCR_ANENABLE) {
4486                         u32 common;
4487
4488                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490                         common = local_adv & remote_adv;
4491                         if (common & (ADVERTISE_1000XHALF |
4492                                       ADVERTISE_1000XFULL)) {
4493                                 if (common & ADVERTISE_1000XFULL)
4494                                         current_duplex = DUPLEX_FULL;
4495                                 else
4496                                         current_duplex = DUPLEX_HALF;
4497                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4498                                 /* Link is up via parallel detect */
4499                         } else {
4500                                 current_link_up = 0;
4501                         }
4502                 }
4503         }
4504
4505         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4507
4508         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509         if (tp->link_config.active_duplex == DUPLEX_HALF)
4510                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4511
4512         tw32_f(MAC_MODE, tp->mac_mode);
4513         udelay(40);
4514
4515         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4516
4517         tp->link_config.active_speed = current_speed;
4518         tp->link_config.active_duplex = current_duplex;
4519
4520         if (current_link_up != netif_carrier_ok(tp->dev)) {
4521                 if (current_link_up)
4522                         netif_carrier_on(tp->dev);
4523                 else {
4524                         netif_carrier_off(tp->dev);
4525                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4526                 }
4527                 tg3_link_report(tp);
4528         }
4529         return err;
4530 }
4531
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4533 {
4534         if (tp->serdes_counter) {
4535                 /* Give autoneg time to complete. */
4536                 tp->serdes_counter--;
4537                 return;
4538         }
4539
4540         if (!netif_carrier_ok(tp->dev) &&
4541             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542                 u32 bmcr;
4543
4544                 tg3_readphy(tp, MII_BMCR, &bmcr);
4545                 if (bmcr & BMCR_ANENABLE) {
4546                         u32 phy1, phy2;
4547
4548                         /* Select shadow register 0x1f */
4549                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4551
4552                         /* Select expansion interrupt status register */
4553                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554                                          MII_TG3_DSP_EXP1_INT_STAT);
4555                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557
4558                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559                                 /* We have signal detect and not receiving
4560                                  * config code words, link is up by parallel
4561                                  * detection.
4562                                  */
4563
4564                                 bmcr &= ~BMCR_ANENABLE;
4565                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566                                 tg3_writephy(tp, MII_BMCR, bmcr);
4567                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4568                         }
4569                 }
4570         } else if (netif_carrier_ok(tp->dev) &&
4571                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573                 u32 phy2;
4574
4575                 /* Select expansion interrupt status register */
4576                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577                                  MII_TG3_DSP_EXP1_INT_STAT);
4578                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579                 if (phy2 & 0x20) {
4580                         u32 bmcr;
4581
4582                         /* Config code words received, turn on autoneg. */
4583                         tg3_readphy(tp, MII_BMCR, &bmcr);
4584                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4585
4586                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4587
4588                 }
4589         }
4590 }
4591
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4593 {
4594         u32 val;
4595         int err;
4596
4597         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598                 err = tg3_setup_fiber_phy(tp, force_reset);
4599         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601         else
4602                 err = tg3_setup_copper_phy(tp, force_reset);
4603
4604         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605                 u32 scale;
4606
4607                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609                         scale = 65;
4610                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611                         scale = 6;
4612                 else
4613                         scale = 12;
4614
4615                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617                 tw32(GRC_MISC_CFG, val);
4618         }
4619
4620         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621               (6 << TX_LENGTHS_IPG_SHIFT);
4622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623                 val |= tr32(MAC_TX_LENGTHS) &
4624                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4626
4627         if (tp->link_config.active_speed == SPEED_1000 &&
4628             tp->link_config.active_duplex == DUPLEX_HALF)
4629                 tw32(MAC_TX_LENGTHS, val |
4630                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631         else
4632                 tw32(MAC_TX_LENGTHS, val |
4633                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4634
4635         if (!tg3_flag(tp, 5705_PLUS)) {
4636                 if (netif_carrier_ok(tp->dev)) {
4637                         tw32(HOSTCC_STAT_COAL_TICKS,
4638                              tp->coal.stats_block_coalesce_usecs);
4639                 } else {
4640                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4641                 }
4642         }
4643
4644         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645                 val = tr32(PCIE_PWR_MGMT_THRESH);
4646                 if (!netif_carrier_ok(tp->dev))
4647                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648                               tp->pwrmgmt_thresh;
4649                 else
4650                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651                 tw32(PCIE_PWR_MGMT_THRESH, val);
4652         }
4653
4654         return err;
4655 }
4656
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4658 {
4659         return tp->irq_sync;
4660 }
4661
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4663 {
4664         int i;
4665
4666         dst = (u32 *)((u8 *)dst + off);
4667         for (i = 0; i < len; i += sizeof(u32))
4668                 *dst++ = tr32(off + i);
4669 }
4670
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4672 {
4673         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4692
4693         if (tg3_flag(tp, SUPPORT_MSIX))
4694                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4695
4696         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4704
4705         if (!tg3_flag(tp, 5705_PLUS)) {
4706                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4709         }
4710
4711         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4716
4717         if (tg3_flag(tp, NVRAM))
4718                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4719 }
4720
4721 static void tg3_dump_state(struct tg3 *tp)
4722 {
4723         int i;
4724         u32 *regs;
4725
4726         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727         if (!regs) {
4728                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729                 return;
4730         }
4731
4732         if (tg3_flag(tp, PCI_EXPRESS)) {
4733                 /* Read up to but not including private PCI registers */
4734                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735                         regs[i / sizeof(u32)] = tr32(i);
4736         } else
4737                 tg3_dump_legacy_regs(tp, regs);
4738
4739         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740                 if (!regs[i + 0] && !regs[i + 1] &&
4741                     !regs[i + 2] && !regs[i + 3])
4742                         continue;
4743
4744                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745                            i * 4,
4746                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4747         }
4748
4749         kfree(regs);
4750
4751         for (i = 0; i < tp->irq_cnt; i++) {
4752                 struct tg3_napi *tnapi = &tp->napi[i];
4753
4754                 /* SW status block */
4755                 netdev_err(tp->dev,
4756                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4757                            i,
4758                            tnapi->hw_status->status,
4759                            tnapi->hw_status->status_tag,
4760                            tnapi->hw_status->rx_jumbo_consumer,
4761                            tnapi->hw_status->rx_consumer,
4762                            tnapi->hw_status->rx_mini_consumer,
4763                            tnapi->hw_status->idx[0].rx_producer,
4764                            tnapi->hw_status->idx[0].tx_consumer);
4765
4766                 netdev_err(tp->dev,
4767                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4768                            i,
4769                            tnapi->last_tag, tnapi->last_irq_tag,
4770                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771                            tnapi->rx_rcb_ptr,
4772                            tnapi->prodring.rx_std_prod_idx,
4773                            tnapi->prodring.rx_std_cons_idx,
4774                            tnapi->prodring.rx_jmb_prod_idx,
4775                            tnapi->prodring.rx_jmb_cons_idx);
4776         }
4777 }
4778
4779 /* This is called whenever we suspect that the system chipset is re-
4780  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781  * is bogus tx completions. We try to recover by setting the
4782  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783  * in the workqueue.
4784  */
4785 static void tg3_tx_recover(struct tg3 *tp)
4786 {
4787         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4789
4790         netdev_warn(tp->dev,
4791                     "The system may be re-ordering memory-mapped I/O "
4792                     "cycles to the network device, attempting to recover. "
4793                     "Please report the problem to the driver maintainer "
4794                     "and include system chipset information.\n");
4795
4796         spin_lock(&tp->lock);
4797         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798         spin_unlock(&tp->lock);
4799 }
4800
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4802 {
4803         /* Tell compiler to fetch tx indices from memory. */
4804         barrier();
4805         return tnapi->tx_pending -
4806                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4807 }
4808
4809 /* Tigon3 never reports partial packet sends.  So we do not
4810  * need special logic to handle SKBs that have not had all
4811  * of their frags sent yet, like SunGEM does.
4812  */
4813 static void tg3_tx(struct tg3_napi *tnapi)
4814 {
4815         struct tg3 *tp = tnapi->tp;
4816         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817         u32 sw_idx = tnapi->tx_cons;
4818         struct netdev_queue *txq;
4819         int index = tnapi - tp->napi;
4820
4821         if (tg3_flag(tp, ENABLE_TSS))
4822                 index--;
4823
4824         txq = netdev_get_tx_queue(tp->dev, index);
4825
4826         while (sw_idx != hw_idx) {
4827                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828                 struct sk_buff *skb = ri->skb;
4829                 int i, tx_bug = 0;
4830
4831                 if (unlikely(skb == NULL)) {
4832                         tg3_tx_recover(tp);
4833                         return;
4834                 }
4835
4836                 pci_unmap_single(tp->pdev,
4837                                  dma_unmap_addr(ri, mapping),
4838                                  skb_headlen(skb),
4839                                  PCI_DMA_TODEVICE);
4840
4841                 ri->skb = NULL;
4842
4843                 while (ri->fragmented) {
4844                         ri->fragmented = false;
4845                         sw_idx = NEXT_TX(sw_idx);
4846                         ri = &tnapi->tx_buffers[sw_idx];
4847                 }
4848
4849                 sw_idx = NEXT_TX(sw_idx);
4850
4851                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4852                         ri = &tnapi->tx_buffers[sw_idx];
4853                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4854                                 tx_bug = 1;
4855
4856                         pci_unmap_page(tp->pdev,
4857                                        dma_unmap_addr(ri, mapping),
4858                                        skb_shinfo(skb)->frags[i].size,
4859                                        PCI_DMA_TODEVICE);
4860
4861                         while (ri->fragmented) {
4862                                 ri->fragmented = false;
4863                                 sw_idx = NEXT_TX(sw_idx);
4864                                 ri = &tnapi->tx_buffers[sw_idx];
4865                         }
4866
4867                         sw_idx = NEXT_TX(sw_idx);
4868                 }
4869
4870                 dev_kfree_skb(skb);
4871
4872                 if (unlikely(tx_bug)) {
4873                         tg3_tx_recover(tp);
4874                         return;
4875                 }
4876         }
4877
4878         tnapi->tx_cons = sw_idx;
4879
4880         /* Need to make the tx_cons update visible to tg3_start_xmit()
4881          * before checking for netif_queue_stopped().  Without the
4882          * memory barrier, there is a small possibility that tg3_start_xmit()
4883          * will miss it and cause the queue to be stopped forever.
4884          */
4885         smp_mb();
4886
4887         if (unlikely(netif_tx_queue_stopped(txq) &&
4888                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4889                 __netif_tx_lock(txq, smp_processor_id());
4890                 if (netif_tx_queue_stopped(txq) &&
4891                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4892                         netif_tx_wake_queue(txq);
4893                 __netif_tx_unlock(txq);
4894         }
4895 }
4896
4897 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4898 {
4899         if (!ri->skb)
4900                 return;
4901
4902         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4903                          map_sz, PCI_DMA_FROMDEVICE);
4904         dev_kfree_skb_any(ri->skb);
4905         ri->skb = NULL;
4906 }
4907
4908 /* Returns size of skb allocated or < 0 on error.
4909  *
4910  * We only need to fill in the address because the other members
4911  * of the RX descriptor are invariant, see tg3_init_rings.
4912  *
4913  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4914  * posting buffers we only dirty the first cache line of the RX
4915  * descriptor (containing the address).  Whereas for the RX status
4916  * buffers the cpu only reads the last cacheline of the RX descriptor
4917  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4918  */
4919 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4920                             u32 opaque_key, u32 dest_idx_unmasked)
4921 {
4922         struct tg3_rx_buffer_desc *desc;
4923         struct ring_info *map;
4924         struct sk_buff *skb;
4925         dma_addr_t mapping;
4926         int skb_size, dest_idx;
4927
4928         switch (opaque_key) {
4929         case RXD_OPAQUE_RING_STD:
4930                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4931                 desc = &tpr->rx_std[dest_idx];
4932                 map = &tpr->rx_std_buffers[dest_idx];
4933                 skb_size = tp->rx_pkt_map_sz;
4934                 break;
4935
4936         case RXD_OPAQUE_RING_JUMBO:
4937                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4938                 desc = &tpr->rx_jmb[dest_idx].std;
4939                 map = &tpr->rx_jmb_buffers[dest_idx];
4940                 skb_size = TG3_RX_JMB_MAP_SZ;
4941                 break;
4942
4943         default:
4944                 return -EINVAL;
4945         }
4946
4947         /* Do not overwrite any of the map or rp information
4948          * until we are sure we can commit to a new buffer.
4949          *
4950          * Callers depend upon this behavior and assume that
4951          * we leave everything unchanged if we fail.
4952          */
4953         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4954         if (skb == NULL)
4955                 return -ENOMEM;
4956
4957         skb_reserve(skb, tp->rx_offset);
4958
4959         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4960                                  PCI_DMA_FROMDEVICE);
4961         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4962                 dev_kfree_skb(skb);
4963                 return -EIO;
4964         }
4965
4966         map->skb = skb;
4967         dma_unmap_addr_set(map, mapping, mapping);
4968
4969         desc->addr_hi = ((u64)mapping >> 32);
4970         desc->addr_lo = ((u64)mapping & 0xffffffff);
4971
4972         return skb_size;
4973 }
4974
4975 /* We only need to move over in the address because the other
4976  * members of the RX descriptor are invariant.  See notes above
4977  * tg3_alloc_rx_skb for full details.
4978  */
4979 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4980                            struct tg3_rx_prodring_set *dpr,
4981                            u32 opaque_key, int src_idx,
4982                            u32 dest_idx_unmasked)
4983 {
4984         struct tg3 *tp = tnapi->tp;
4985         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4986         struct ring_info *src_map, *dest_map;
4987         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4988         int dest_idx;
4989
4990         switch (opaque_key) {
4991         case RXD_OPAQUE_RING_STD:
4992                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4993                 dest_desc = &dpr->rx_std[dest_idx];
4994                 dest_map = &dpr->rx_std_buffers[dest_idx];
4995                 src_desc = &spr->rx_std[src_idx];
4996                 src_map = &spr->rx_std_buffers[src_idx];
4997                 break;
4998
4999         case RXD_OPAQUE_RING_JUMBO:
5000                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5001                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5002                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5003                 src_desc = &spr->rx_jmb[src_idx].std;
5004                 src_map = &spr->rx_jmb_buffers[src_idx];
5005                 break;
5006
5007         default:
5008                 return;
5009         }
5010
5011         dest_map->skb = src_map->skb;
5012         dma_unmap_addr_set(dest_map, mapping,
5013                            dma_unmap_addr(src_map, mapping));
5014         dest_desc->addr_hi = src_desc->addr_hi;
5015         dest_desc->addr_lo = src_desc->addr_lo;
5016
5017         /* Ensure that the update to the skb happens after the physical
5018          * addresses have been transferred to the new BD location.
5019          */
5020         smp_wmb();
5021
5022         src_map->skb = NULL;
5023 }
5024
5025 /* The RX ring scheme is composed of multiple rings which post fresh
5026  * buffers to the chip, and one special ring the chip uses to report
5027  * status back to the host.
5028  *
5029  * The special ring reports the status of received packets to the
5030  * host.  The chip does not write into the original descriptor the
5031  * RX buffer was obtained from.  The chip simply takes the original
5032  * descriptor as provided by the host, updates the status and length
5033  * field, then writes this into the next status ring entry.
5034  *
5035  * Each ring the host uses to post buffers to the chip is described
5036  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5037  * it is first placed into the on-chip ram.  When the packet's length
5038  * is known, it walks down the TG3_BDINFO entries to select the ring.
5039  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5040  * which is within the range of the new packet's length is chosen.
5041  *
5042  * The "separate ring for rx status" scheme may sound queer, but it makes
5043  * sense from a cache coherency perspective.  If only the host writes
5044  * to the buffer post rings, and only the chip writes to the rx status
5045  * rings, then cache lines never move beyond shared-modified state.
5046  * If both the host and chip were to write into the same ring, cache line
5047  * eviction could occur since both entities want it in an exclusive state.
5048  */
5049 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5050 {
5051         struct tg3 *tp = tnapi->tp;
5052         u32 work_mask, rx_std_posted = 0;
5053         u32 std_prod_idx, jmb_prod_idx;
5054         u32 sw_idx = tnapi->rx_rcb_ptr;
5055         u16 hw_idx;
5056         int received;
5057         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5058
5059         hw_idx = *(tnapi->rx_rcb_prod_idx);
5060         /*
5061          * We need to order the read of hw_idx and the read of
5062          * the opaque cookie.
5063          */
5064         rmb();
5065         work_mask = 0;
5066         received = 0;
5067         std_prod_idx = tpr->rx_std_prod_idx;
5068         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5069         while (sw_idx != hw_idx && budget > 0) {
5070                 struct ring_info *ri;
5071                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5072                 unsigned int len;
5073                 struct sk_buff *skb;
5074                 dma_addr_t dma_addr;
5075                 u32 opaque_key, desc_idx, *post_ptr;
5076
5077                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5078                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5079                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5080                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5081                         dma_addr = dma_unmap_addr(ri, mapping);
5082                         skb = ri->skb;
5083                         post_ptr = &std_prod_idx;
5084                         rx_std_posted++;
5085                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5086                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5087                         dma_addr = dma_unmap_addr(ri, mapping);
5088                         skb = ri->skb;
5089                         post_ptr = &jmb_prod_idx;
5090                 } else
5091                         goto next_pkt_nopost;
5092
5093                 work_mask |= opaque_key;
5094
5095                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5096                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5097                 drop_it:
5098                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5099                                        desc_idx, *post_ptr);
5100                 drop_it_no_recycle:
5101                         /* Other statistics kept track of by card. */
5102                         tp->rx_dropped++;
5103                         goto next_pkt;
5104                 }
5105
5106                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5107                       ETH_FCS_LEN;
5108
5109                 if (len > TG3_RX_COPY_THRESH(tp)) {
5110                         int skb_size;
5111
5112                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5113                                                     *post_ptr);
5114                         if (skb_size < 0)
5115                                 goto drop_it;
5116
5117                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5118                                          PCI_DMA_FROMDEVICE);
5119
5120                         /* Ensure that the update to the skb happens
5121                          * after the usage of the old DMA mapping.
5122                          */
5123                         smp_wmb();
5124
5125                         ri->skb = NULL;
5126
5127                         skb_put(skb, len);
5128                 } else {
5129                         struct sk_buff *copy_skb;
5130
5131                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5132                                        desc_idx, *post_ptr);
5133
5134                         copy_skb = netdev_alloc_skb(tp->dev, len +
5135                                                     TG3_RAW_IP_ALIGN);
5136                         if (copy_skb == NULL)
5137                                 goto drop_it_no_recycle;
5138
5139                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5140                         skb_put(copy_skb, len);
5141                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5142                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5143                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5144
5145                         /* We'll reuse the original ring buffer. */
5146                         skb = copy_skb;
5147                 }
5148
5149                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5150                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5151                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5152                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5153                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5154                 else
5155                         skb_checksum_none_assert(skb);
5156
5157                 skb->protocol = eth_type_trans(skb, tp->dev);
5158
5159                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5160                     skb->protocol != htons(ETH_P_8021Q)) {
5161                         dev_kfree_skb(skb);
5162                         goto drop_it_no_recycle;
5163                 }
5164
5165                 if (desc->type_flags & RXD_FLAG_VLAN &&
5166                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5167                         __vlan_hwaccel_put_tag(skb,
5168                                                desc->err_vlan & RXD_VLAN_MASK);
5169
5170                 napi_gro_receive(&tnapi->napi, skb);
5171
5172                 received++;
5173                 budget--;
5174
5175 next_pkt:
5176                 (*post_ptr)++;
5177
5178                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5179                         tpr->rx_std_prod_idx = std_prod_idx &
5180                                                tp->rx_std_ring_mask;
5181                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5182                                      tpr->rx_std_prod_idx);
5183                         work_mask &= ~RXD_OPAQUE_RING_STD;
5184                         rx_std_posted = 0;
5185                 }
5186 next_pkt_nopost:
5187                 sw_idx++;
5188                 sw_idx &= tp->rx_ret_ring_mask;
5189
5190                 /* Refresh hw_idx to see if there is new work */
5191                 if (sw_idx == hw_idx) {
5192                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5193                         rmb();
5194                 }
5195         }
5196
5197         /* ACK the status ring. */
5198         tnapi->rx_rcb_ptr = sw_idx;
5199         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5200
5201         /* Refill RX ring(s). */
5202         if (!tg3_flag(tp, ENABLE_RSS)) {
5203                 if (work_mask & RXD_OPAQUE_RING_STD) {
5204                         tpr->rx_std_prod_idx = std_prod_idx &
5205                                                tp->rx_std_ring_mask;
5206                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5207                                      tpr->rx_std_prod_idx);
5208                 }
5209                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5210                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5211                                                tp->rx_jmb_ring_mask;
5212                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5213                                      tpr->rx_jmb_prod_idx);
5214                 }
5215                 mmiowb();
5216         } else if (work_mask) {
5217                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5218                  * updated before the producer indices can be updated.
5219                  */
5220                 smp_wmb();
5221
5222                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5223                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5224
5225                 if (tnapi != &tp->napi[1])
5226                         napi_schedule(&tp->napi[1].napi);
5227         }
5228
5229         return received;
5230 }
5231
5232 static void tg3_poll_link(struct tg3 *tp)
5233 {
5234         /* handle link change and other phy events */
5235         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5236                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5237
5238                 if (sblk->status & SD_STATUS_LINK_CHG) {
5239                         sblk->status = SD_STATUS_UPDATED |
5240                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5241                         spin_lock(&tp->lock);
5242                         if (tg3_flag(tp, USE_PHYLIB)) {
5243                                 tw32_f(MAC_STATUS,
5244                                      (MAC_STATUS_SYNC_CHANGED |
5245                                       MAC_STATUS_CFG_CHANGED |
5246                                       MAC_STATUS_MI_COMPLETION |
5247                                       MAC_STATUS_LNKSTATE_CHANGED));
5248                                 udelay(40);
5249                         } else
5250                                 tg3_setup_phy(tp, 0);
5251                         spin_unlock(&tp->lock);
5252                 }
5253         }
5254 }
5255
5256 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5257                                 struct tg3_rx_prodring_set *dpr,
5258                                 struct tg3_rx_prodring_set *spr)
5259 {
5260         u32 si, di, cpycnt, src_prod_idx;
5261         int i, err = 0;
5262
5263         while (1) {
5264                 src_prod_idx = spr->rx_std_prod_idx;
5265
5266                 /* Make sure updates to the rx_std_buffers[] entries and the
5267                  * standard producer index are seen in the correct order.
5268                  */
5269                 smp_rmb();
5270
5271                 if (spr->rx_std_cons_idx == src_prod_idx)
5272                         break;
5273
5274                 if (spr->rx_std_cons_idx < src_prod_idx)
5275                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5276                 else
5277                         cpycnt = tp->rx_std_ring_mask + 1 -
5278                                  spr->rx_std_cons_idx;
5279
5280                 cpycnt = min(cpycnt,
5281                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5282
5283                 si = spr->rx_std_cons_idx;
5284                 di = dpr->rx_std_prod_idx;
5285
5286                 for (i = di; i < di + cpycnt; i++) {
5287                         if (dpr->rx_std_buffers[i].skb) {
5288                                 cpycnt = i - di;
5289                                 err = -ENOSPC;
5290                                 break;
5291                         }
5292                 }
5293
5294                 if (!cpycnt)
5295                         break;
5296
5297                 /* Ensure that updates to the rx_std_buffers ring and the
5298                  * shadowed hardware producer ring from tg3_recycle_skb() are
5299                  * ordered correctly WRT the skb check above.
5300                  */
5301                 smp_rmb();
5302
5303                 memcpy(&dpr->rx_std_buffers[di],
5304                        &spr->rx_std_buffers[si],
5305                        cpycnt * sizeof(struct ring_info));
5306
5307                 for (i = 0; i < cpycnt; i++, di++, si++) {
5308                         struct tg3_rx_buffer_desc *sbd, *dbd;
5309                         sbd = &spr->rx_std[si];
5310                         dbd = &dpr->rx_std[di];
5311                         dbd->addr_hi = sbd->addr_hi;
5312                         dbd->addr_lo = sbd->addr_lo;
5313                 }
5314
5315                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5316                                        tp->rx_std_ring_mask;
5317                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5318                                        tp->rx_std_ring_mask;
5319         }
5320
5321         while (1) {
5322                 src_prod_idx = spr->rx_jmb_prod_idx;
5323
5324                 /* Make sure updates to the rx_jmb_buffers[] entries and
5325                  * the jumbo producer index are seen in the correct order.
5326                  */
5327                 smp_rmb();
5328
5329                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5330                         break;
5331
5332                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5333                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5334                 else
5335                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5336                                  spr->rx_jmb_cons_idx;
5337
5338                 cpycnt = min(cpycnt,
5339                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5340
5341                 si = spr->rx_jmb_cons_idx;
5342                 di = dpr->rx_jmb_prod_idx;
5343
5344                 for (i = di; i < di + cpycnt; i++) {
5345                         if (dpr->rx_jmb_buffers[i].skb) {
5346                                 cpycnt = i - di;
5347                                 err = -ENOSPC;
5348                                 break;
5349                         }
5350                 }
5351
5352                 if (!cpycnt)
5353                         break;
5354
5355                 /* Ensure that updates to the rx_jmb_buffers ring and the
5356                  * shadowed hardware producer ring from tg3_recycle_skb() are
5357                  * ordered correctly WRT the skb check above.
5358                  */
5359                 smp_rmb();
5360
5361                 memcpy(&dpr->rx_jmb_buffers[di],
5362                        &spr->rx_jmb_buffers[si],
5363                        cpycnt * sizeof(struct ring_info));
5364
5365                 for (i = 0; i < cpycnt; i++, di++, si++) {
5366                         struct tg3_rx_buffer_desc *sbd, *dbd;
5367                         sbd = &spr->rx_jmb[si].std;
5368                         dbd = &dpr->rx_jmb[di].std;
5369                         dbd->addr_hi = sbd->addr_hi;
5370                         dbd->addr_lo = sbd->addr_lo;
5371                 }
5372
5373                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5374                                        tp->rx_jmb_ring_mask;
5375                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5376                                        tp->rx_jmb_ring_mask;
5377         }
5378
5379         return err;
5380 }
5381
5382 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5383 {
5384         struct tg3 *tp = tnapi->tp;
5385
5386         /* run TX completion thread */
5387         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5388                 tg3_tx(tnapi);
5389                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5390                         return work_done;
5391         }
5392
5393         /* run RX thread, within the bounds set by NAPI.
5394          * All RX "locking" is done by ensuring outside
5395          * code synchronizes with tg3->napi.poll()
5396          */
5397         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5398                 work_done += tg3_rx(tnapi, budget - work_done);
5399
5400         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5401                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5402                 int i, err = 0;
5403                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5404                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5405
5406                 for (i = 1; i < tp->irq_cnt; i++)
5407                         err |= tg3_rx_prodring_xfer(tp, dpr,
5408                                                     &tp->napi[i].prodring);
5409
5410                 wmb();
5411
5412                 if (std_prod_idx != dpr->rx_std_prod_idx)
5413                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5414                                      dpr->rx_std_prod_idx);
5415
5416                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5417                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5418                                      dpr->rx_jmb_prod_idx);
5419
5420                 mmiowb();
5421
5422                 if (err)
5423                         tw32_f(HOSTCC_MODE, tp->coal_now);
5424         }
5425
5426         return work_done;
5427 }
5428
5429 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5430 {
5431         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5432         struct tg3 *tp = tnapi->tp;
5433         int work_done = 0;
5434         struct tg3_hw_status *sblk = tnapi->hw_status;
5435
5436         while (1) {
5437                 work_done = tg3_poll_work(tnapi, work_done, budget);
5438
5439                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5440                         goto tx_recovery;
5441
5442                 if (unlikely(work_done >= budget))
5443                         break;
5444
5445                 /* tp->last_tag is used in tg3_int_reenable() below
5446                  * to tell the hw how much work has been processed,
5447                  * so we must read it before checking for more work.
5448                  */
5449                 tnapi->last_tag = sblk->status_tag;
5450                 tnapi->last_irq_tag = tnapi->last_tag;
5451                 rmb();
5452
5453                 /* check for RX/TX work to do */
5454                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5455                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5456                         napi_complete(napi);
5457                         /* Reenable interrupts. */
5458                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5459                         mmiowb();
5460                         break;
5461                 }
5462         }
5463
5464         return work_done;
5465
5466 tx_recovery:
5467         /* work_done is guaranteed to be less than budget. */
5468         napi_complete(napi);
5469         schedule_work(&tp->reset_task);
5470         return work_done;
5471 }
5472
5473 static void tg3_process_error(struct tg3 *tp)
5474 {
5475         u32 val;
5476         bool real_error = false;
5477
5478         if (tg3_flag(tp, ERROR_PROCESSED))
5479                 return;
5480
5481         /* Check Flow Attention register */
5482         val = tr32(HOSTCC_FLOW_ATTN);
5483         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5484                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5485                 real_error = true;
5486         }
5487
5488         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5489                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5490                 real_error = true;
5491         }
5492
5493         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5494                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5495                 real_error = true;
5496         }
5497
5498         if (!real_error)
5499                 return;
5500
5501         tg3_dump_state(tp);
5502
5503         tg3_flag_set(tp, ERROR_PROCESSED);
5504         schedule_work(&tp->reset_task);
5505 }
5506
5507 static int tg3_poll(struct napi_struct *napi, int budget)
5508 {
5509         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5510         struct tg3 *tp = tnapi->tp;
5511         int work_done = 0;
5512         struct tg3_hw_status *sblk = tnapi->hw_status;
5513
5514         while (1) {
5515                 if (sblk->status & SD_STATUS_ERROR)
5516                         tg3_process_error(tp);
5517
5518                 tg3_poll_link(tp);
5519
5520                 work_done = tg3_poll_work(tnapi, work_done, budget);
5521
5522                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5523                         goto tx_recovery;
5524
5525                 if (unlikely(work_done >= budget))
5526                         break;
5527
5528                 if (tg3_flag(tp, TAGGED_STATUS)) {
5529                         /* tp->last_tag is used in tg3_int_reenable() below
5530                          * to tell the hw how much work has been processed,
5531                          * so we must read it before checking for more work.
5532                          */
5533                         tnapi->last_tag = sblk->status_tag;
5534                         tnapi->last_irq_tag = tnapi->last_tag;
5535                         rmb();
5536                 } else
5537                         sblk->status &= ~SD_STATUS_UPDATED;
5538
5539                 if (likely(!tg3_has_work(tnapi))) {
5540                         napi_complete(napi);
5541                         tg3_int_reenable(tnapi);
5542                         break;
5543                 }
5544         }
5545
5546         return work_done;
5547
5548 tx_recovery:
5549         /* work_done is guaranteed to be less than budget. */
5550         napi_complete(napi);
5551         schedule_work(&tp->reset_task);
5552         return work_done;
5553 }
5554
5555 static void tg3_napi_disable(struct tg3 *tp)
5556 {
5557         int i;
5558
5559         for (i = tp->irq_cnt - 1; i >= 0; i--)
5560                 napi_disable(&tp->napi[i].napi);
5561 }
5562
5563 static void tg3_napi_enable(struct tg3 *tp)
5564 {
5565         int i;
5566
5567         for (i = 0; i < tp->irq_cnt; i++)
5568                 napi_enable(&tp->napi[i].napi);
5569 }
5570
5571 static void tg3_napi_init(struct tg3 *tp)
5572 {
5573         int i;
5574
5575         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5576         for (i = 1; i < tp->irq_cnt; i++)
5577                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5578 }
5579
5580 static void tg3_napi_fini(struct tg3 *tp)
5581 {
5582         int i;
5583
5584         for (i = 0; i < tp->irq_cnt; i++)
5585                 netif_napi_del(&tp->napi[i].napi);
5586 }
5587
5588 static inline void tg3_netif_stop(struct tg3 *tp)
5589 {
5590         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5591         tg3_napi_disable(tp);
5592         netif_tx_disable(tp->dev);
5593 }
5594
5595 static inline void tg3_netif_start(struct tg3 *tp)
5596 {
5597         /* NOTE: unconditional netif_tx_wake_all_queues is only
5598          * appropriate so long as all callers are assured to
5599          * have free tx slots (such as after tg3_init_hw)
5600          */
5601         netif_tx_wake_all_queues(tp->dev);
5602
5603         tg3_napi_enable(tp);
5604         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5605         tg3_enable_ints(tp);
5606 }
5607
5608 static void tg3_irq_quiesce(struct tg3 *tp)
5609 {
5610         int i;
5611
5612         BUG_ON(tp->irq_sync);
5613
5614         tp->irq_sync = 1;
5615         smp_mb();
5616
5617         for (i = 0; i < tp->irq_cnt; i++)
5618                 synchronize_irq(tp->napi[i].irq_vec);
5619 }
5620
5621 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5622  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5623  * with as well.  Most of the time, this is not necessary except when
5624  * shutting down the device.
5625  */
5626 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5627 {
5628         spin_lock_bh(&tp->lock);
5629         if (irq_sync)
5630                 tg3_irq_quiesce(tp);
5631 }
5632
5633 static inline void tg3_full_unlock(struct tg3 *tp)
5634 {
5635         spin_unlock_bh(&tp->lock);
5636 }
5637
5638 /* One-shot MSI handler - Chip automatically disables interrupt
5639  * after sending MSI so driver doesn't have to do it.
5640  */
5641 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5642 {
5643         struct tg3_napi *tnapi = dev_id;
5644         struct tg3 *tp = tnapi->tp;
5645
5646         prefetch(tnapi->hw_status);
5647         if (tnapi->rx_rcb)
5648                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5649
5650         if (likely(!tg3_irq_sync(tp)))
5651                 napi_schedule(&tnapi->napi);
5652
5653         return IRQ_HANDLED;
5654 }
5655
5656 /* MSI ISR - No need to check for interrupt sharing and no need to
5657  * flush status block and interrupt mailbox. PCI ordering rules
5658  * guarantee that MSI will arrive after the status block.
5659  */
5660 static irqreturn_t tg3_msi(int irq, void *dev_id)
5661 {
5662         struct tg3_napi *tnapi = dev_id;
5663         struct tg3 *tp = tnapi->tp;
5664
5665         prefetch(tnapi->hw_status);
5666         if (tnapi->rx_rcb)
5667                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5668         /*
5669          * Writing any value to intr-mbox-0 clears PCI INTA# and
5670          * chip-internal interrupt pending events.
5671          * Writing non-zero to intr-mbox-0 additional tells the
5672          * NIC to stop sending us irqs, engaging "in-intr-handler"
5673          * event coalescing.
5674          */
5675         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5676         if (likely(!tg3_irq_sync(tp)))
5677                 napi_schedule(&tnapi->napi);
5678
5679         return IRQ_RETVAL(1);
5680 }
5681
5682 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5683 {
5684         struct tg3_napi *tnapi = dev_id;
5685         struct tg3 *tp = tnapi->tp;
5686         struct tg3_hw_status *sblk = tnapi->hw_status;
5687         unsigned int handled = 1;
5688
5689         /* In INTx mode, it is possible for the interrupt to arrive at
5690          * the CPU before the status block posted prior to the interrupt.
5691          * Reading the PCI State register will confirm whether the
5692          * interrupt is ours and will flush the status block.
5693          */
5694         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5695                 if (tg3_flag(tp, CHIP_RESETTING) ||
5696                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5697                         handled = 0;
5698                         goto out;
5699                 }
5700         }
5701
5702         /*
5703          * Writing any value to intr-mbox-0 clears PCI INTA# and
5704          * chip-internal interrupt pending events.
5705          * Writing non-zero to intr-mbox-0 additional tells the
5706          * NIC to stop sending us irqs, engaging "in-intr-handler"
5707          * event coalescing.
5708          *
5709          * Flush the mailbox to de-assert the IRQ immediately to prevent
5710          * spurious interrupts.  The flush impacts performance but
5711          * excessive spurious interrupts can be worse in some cases.
5712          */
5713         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5714         if (tg3_irq_sync(tp))
5715                 goto out;
5716         sblk->status &= ~SD_STATUS_UPDATED;
5717         if (likely(tg3_has_work(tnapi))) {
5718                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5719                 napi_schedule(&tnapi->napi);
5720         } else {
5721                 /* No work, shared interrupt perhaps?  re-enable
5722                  * interrupts, and flush that PCI write
5723                  */
5724                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5725                                0x00000000);
5726         }
5727 out:
5728         return IRQ_RETVAL(handled);
5729 }
5730
5731 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5732 {
5733         struct tg3_napi *tnapi = dev_id;
5734         struct tg3 *tp = tnapi->tp;
5735         struct tg3_hw_status *sblk = tnapi->hw_status;
5736         unsigned int handled = 1;
5737
5738         /* In INTx mode, it is possible for the interrupt to arrive at
5739          * the CPU before the status block posted prior to the interrupt.
5740          * Reading the PCI State register will confirm whether the
5741          * interrupt is ours and will flush the status block.
5742          */
5743         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5744                 if (tg3_flag(tp, CHIP_RESETTING) ||
5745                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5746                         handled = 0;
5747                         goto out;
5748                 }
5749         }
5750
5751         /*
5752          * writing any value to intr-mbox-0 clears PCI INTA# and
5753          * chip-internal interrupt pending events.
5754          * writing non-zero to intr-mbox-0 additional tells the
5755          * NIC to stop sending us irqs, engaging "in-intr-handler"
5756          * event coalescing.
5757          *
5758          * Flush the mailbox to de-assert the IRQ immediately to prevent
5759          * spurious interrupts.  The flush impacts performance but
5760          * excessive spurious interrupts can be worse in some cases.
5761          */
5762         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5763
5764         /*
5765          * In a shared interrupt configuration, sometimes other devices'
5766          * interrupts will scream.  We record the current status tag here
5767          * so that the above check can report that the screaming interrupts
5768          * are unhandled.  Eventually they will be silenced.
5769          */
5770         tnapi->last_irq_tag = sblk->status_tag;
5771
5772         if (tg3_irq_sync(tp))
5773                 goto out;
5774
5775         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5776
5777         napi_schedule(&tnapi->napi);
5778
5779 out:
5780         return IRQ_RETVAL(handled);
5781 }
5782
5783 /* ISR for interrupt test */
5784 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5785 {
5786         struct tg3_napi *tnapi = dev_id;
5787         struct tg3 *tp = tnapi->tp;
5788         struct tg3_hw_status *sblk = tnapi->hw_status;
5789
5790         if ((sblk->status & SD_STATUS_UPDATED) ||
5791             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5792                 tg3_disable_ints(tp);
5793                 return IRQ_RETVAL(1);
5794         }
5795         return IRQ_RETVAL(0);
5796 }
5797
5798 static int tg3_init_hw(struct tg3 *, int);
5799 static int tg3_halt(struct tg3 *, int, int);
5800
5801 /* Restart hardware after configuration changes, self-test, etc.
5802  * Invoked with tp->lock held.
5803  */
5804 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5805         __releases(tp->lock)
5806         __acquires(tp->lock)
5807 {
5808         int err;
5809
5810         err = tg3_init_hw(tp, reset_phy);
5811         if (err) {
5812                 netdev_err(tp->dev,
5813                            "Failed to re-initialize device, aborting\n");
5814                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5815                 tg3_full_unlock(tp);
5816                 del_timer_sync(&tp->timer);
5817                 tp->irq_sync = 0;
5818                 tg3_napi_enable(tp);
5819                 dev_close(tp->dev);
5820                 tg3_full_lock(tp, 0);
5821         }
5822         return err;
5823 }
5824
5825 #ifdef CONFIG_NET_POLL_CONTROLLER
5826 static void tg3_poll_controller(struct net_device *dev)
5827 {
5828         int i;
5829         struct tg3 *tp = netdev_priv(dev);
5830
5831         for (i = 0; i < tp->irq_cnt; i++)
5832                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5833 }
5834 #endif
5835
5836 static void tg3_reset_task(struct work_struct *work)
5837 {
5838         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5839         int err;
5840         unsigned int restart_timer;
5841
5842         tg3_full_lock(tp, 0);
5843
5844         if (!netif_running(tp->dev)) {
5845                 tg3_full_unlock(tp);
5846                 return;
5847         }
5848
5849         tg3_full_unlock(tp);
5850
5851         tg3_phy_stop(tp);
5852
5853         tg3_netif_stop(tp);
5854
5855         tg3_full_lock(tp, 1);
5856
5857         restart_timer = tg3_flag(tp, RESTART_TIMER);
5858         tg3_flag_clear(tp, RESTART_TIMER);
5859
5860         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5861                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5862                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5863                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5864                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5865         }
5866
5867         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5868         err = tg3_init_hw(tp, 1);
5869         if (err)
5870                 goto out;
5871
5872         tg3_netif_start(tp);
5873
5874         if (restart_timer)
5875                 mod_timer(&tp->timer, jiffies + 1);
5876
5877 out:
5878         tg3_full_unlock(tp);
5879
5880         if (!err)
5881                 tg3_phy_start(tp);
5882 }
5883
5884 static void tg3_tx_timeout(struct net_device *dev)
5885 {
5886         struct tg3 *tp = netdev_priv(dev);
5887
5888         if (netif_msg_tx_err(tp)) {
5889                 netdev_err(dev, "transmit timed out, resetting\n");
5890                 tg3_dump_state(tp);
5891         }
5892
5893         schedule_work(&tp->reset_task);
5894 }
5895
5896 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5897 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5898 {
5899         u32 base = (u32) mapping & 0xffffffff;
5900
5901         return (base > 0xffffdcc0) && (base + len + 8 < base);
5902 }
5903
5904 /* Test for DMA addresses > 40-bit */
5905 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5906                                           int len)
5907 {
5908 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5909         if (tg3_flag(tp, 40BIT_DMA_BUG))
5910                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5911         return 0;
5912 #else
5913         return 0;
5914 #endif
5915 }
5916
5917 static inline void tg3_tx_set_bd(struct tg3_napi *tnapi, u32 entry,
5918                                  dma_addr_t mapping, u32 len, u32 flags,
5919                                  u32 mss, u32 vlan)
5920 {
5921         struct tg3_tx_buffer_desc *txbd = &tnapi->tx_ring[entry];
5922
5923         txbd->addr_hi = ((u64) mapping >> 32);
5924         txbd->addr_lo = ((u64) mapping & 0xffffffff);
5925         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5926         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5927 }
5928
5929 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5930 {
5931         int i;
5932         struct sk_buff *skb;
5933         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5934
5935         skb = txb->skb;
5936         txb->skb = NULL;
5937
5938         pci_unmap_single(tnapi->tp->pdev,
5939                          dma_unmap_addr(txb, mapping),
5940                          skb_headlen(skb),
5941                          PCI_DMA_TODEVICE);
5942
5943         while (txb->fragmented) {
5944                 txb->fragmented = false;
5945                 entry = NEXT_TX(entry);
5946                 txb = &tnapi->tx_buffers[entry];
5947         }
5948
5949         for (i = 0; i < last; i++) {
5950                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5951
5952                 entry = NEXT_TX(entry);
5953                 txb = &tnapi->tx_buffers[entry];
5954
5955                 pci_unmap_page(tnapi->tp->pdev,
5956                                dma_unmap_addr(txb, mapping),
5957                                frag->size, PCI_DMA_TODEVICE);
5958
5959                 while (txb->fragmented) {
5960                         txb->fragmented = false;
5961                         entry = NEXT_TX(entry);
5962                         txb = &tnapi->tx_buffers[entry];
5963                 }
5964         }
5965 }
5966
5967 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5968 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5969                                        struct sk_buff *skb,
5970                                        u32 base_flags, u32 mss, u32 vlan)
5971 {
5972         struct tg3 *tp = tnapi->tp;
5973         struct sk_buff *new_skb;
5974         dma_addr_t new_addr = 0;
5975         u32 entry = tnapi->tx_prod;
5976         int ret = 0;
5977
5978         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5979                 new_skb = skb_copy(skb, GFP_ATOMIC);
5980         else {
5981                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5982
5983                 new_skb = skb_copy_expand(skb,
5984                                           skb_headroom(skb) + more_headroom,
5985                                           skb_tailroom(skb), GFP_ATOMIC);
5986         }
5987
5988         if (!new_skb) {
5989                 ret = -1;
5990         } else {
5991                 /* New SKB is guaranteed to be linear. */
5992                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5993                                           PCI_DMA_TODEVICE);
5994                 /* Make sure the mapping succeeded */
5995                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5996                         ret = -1;
5997                         dev_kfree_skb(new_skb);
5998
5999                 /* Make sure new skb does not cross any 4G boundaries.
6000                  * Drop the packet if it does.
6001                  */
6002                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
6003                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
6004                                          PCI_DMA_TODEVICE);
6005                         ret = -1;
6006                         dev_kfree_skb(new_skb);
6007                 } else {
6008                         base_flags |= TXD_FLAG_END;
6009
6010                         tnapi->tx_buffers[entry].skb = new_skb;
6011                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
6012                                            mapping, new_addr);
6013
6014                         tg3_tx_set_bd(tnapi, entry, new_addr, new_skb->len,
6015                                       base_flags, mss, vlan);
6016                 }
6017         }
6018
6019         dev_kfree_skb(skb);
6020
6021         return ret;
6022 }
6023
6024 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6025
6026 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6027  * TSO header is greater than 80 bytes.
6028  */
6029 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6030 {
6031         struct sk_buff *segs, *nskb;
6032         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6033
6034         /* Estimate the number of fragments in the worst case */
6035         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6036                 netif_stop_queue(tp->dev);
6037
6038                 /* netif_tx_stop_queue() must be done before checking
6039                  * checking tx index in tg3_tx_avail() below, because in
6040                  * tg3_tx(), we update tx index before checking for
6041                  * netif_tx_queue_stopped().
6042                  */
6043                 smp_mb();
6044                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6045                         return NETDEV_TX_BUSY;
6046
6047                 netif_wake_queue(tp->dev);
6048         }
6049
6050         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6051         if (IS_ERR(segs))
6052                 goto tg3_tso_bug_end;
6053
6054         do {
6055                 nskb = segs;
6056                 segs = segs->next;
6057                 nskb->next = NULL;
6058                 tg3_start_xmit(nskb, tp->dev);
6059         } while (segs);
6060
6061 tg3_tso_bug_end:
6062         dev_kfree_skb(skb);
6063
6064         return NETDEV_TX_OK;
6065 }
6066
6067 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6068  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6069  */
6070 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6071 {
6072         struct tg3 *tp = netdev_priv(dev);
6073         u32 len, entry, base_flags, mss, vlan = 0;
6074         int i = -1, would_hit_hwbug;
6075         dma_addr_t mapping;
6076         struct tg3_napi *tnapi;
6077         struct netdev_queue *txq;
6078         unsigned int last;
6079
6080         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6081         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6082         if (tg3_flag(tp, ENABLE_TSS))
6083                 tnapi++;
6084
6085         /* We are running in BH disabled context with netif_tx_lock
6086          * and TX reclaim runs via tp->napi.poll inside of a software
6087          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6088          * no IRQ context deadlocks to worry about either.  Rejoice!
6089          */
6090         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6091                 if (!netif_tx_queue_stopped(txq)) {
6092                         netif_tx_stop_queue(txq);
6093
6094                         /* This is a hard error, log it. */
6095                         netdev_err(dev,
6096                                    "BUG! Tx Ring full when queue awake!\n");
6097                 }
6098                 return NETDEV_TX_BUSY;
6099         }
6100
6101         entry = tnapi->tx_prod;
6102         base_flags = 0;
6103         if (skb->ip_summed == CHECKSUM_PARTIAL)
6104                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6105
6106         mss = skb_shinfo(skb)->gso_size;
6107         if (mss) {
6108                 struct iphdr *iph;
6109                 u32 tcp_opt_len, hdr_len;
6110
6111                 if (skb_header_cloned(skb) &&
6112                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6113                         dev_kfree_skb(skb);
6114                         goto out_unlock;
6115                 }
6116
6117                 iph = ip_hdr(skb);
6118                 tcp_opt_len = tcp_optlen(skb);
6119
6120                 if (skb_is_gso_v6(skb)) {
6121                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6122                 } else {
6123                         u32 ip_tcp_len;
6124
6125                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6126                         hdr_len = ip_tcp_len + tcp_opt_len;
6127
6128                         iph->check = 0;
6129                         iph->tot_len = htons(mss + hdr_len);
6130                 }
6131
6132                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6133                     tg3_flag(tp, TSO_BUG))
6134                         return tg3_tso_bug(tp, skb);
6135
6136                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6137                                TXD_FLAG_CPU_POST_DMA);
6138
6139                 if (tg3_flag(tp, HW_TSO_1) ||
6140                     tg3_flag(tp, HW_TSO_2) ||
6141                     tg3_flag(tp, HW_TSO_3)) {
6142                         tcp_hdr(skb)->check = 0;
6143                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6144                 } else
6145                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6146                                                                  iph->daddr, 0,
6147                                                                  IPPROTO_TCP,
6148                                                                  0);
6149
6150                 if (tg3_flag(tp, HW_TSO_3)) {
6151                         mss |= (hdr_len & 0xc) << 12;
6152                         if (hdr_len & 0x10)
6153                                 base_flags |= 0x00000010;
6154                         base_flags |= (hdr_len & 0x3e0) << 5;
6155                 } else if (tg3_flag(tp, HW_TSO_2))
6156                         mss |= hdr_len << 9;
6157                 else if (tg3_flag(tp, HW_TSO_1) ||
6158                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6159                         if (tcp_opt_len || iph->ihl > 5) {
6160                                 int tsflags;
6161
6162                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6163                                 mss |= (tsflags << 11);
6164                         }
6165                 } else {
6166                         if (tcp_opt_len || iph->ihl > 5) {
6167                                 int tsflags;
6168
6169                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6170                                 base_flags |= tsflags << 12;
6171                         }
6172                 }
6173         }
6174
6175 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6176         if (vlan_tx_tag_present(skb)) {
6177                 base_flags |= TXD_FLAG_VLAN;
6178                 vlan = vlan_tx_tag_get(skb);
6179         }
6180 #endif
6181
6182         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6183             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6184                 base_flags |= TXD_FLAG_JMB_PKT;
6185
6186         len = skb_headlen(skb);
6187
6188         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6189         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6190                 dev_kfree_skb(skb);
6191                 goto out_unlock;
6192         }
6193
6194         tnapi->tx_buffers[entry].skb = skb;
6195         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6196
6197         would_hit_hwbug = 0;
6198
6199         if (tg3_4g_overflow_test(mapping, len))
6200                 would_hit_hwbug = 1;
6201
6202         if (tg3_40bit_overflow_test(tp, mapping, len))
6203                 would_hit_hwbug = 1;
6204
6205         if (tg3_flag(tp, 5701_DMA_BUG))
6206                 would_hit_hwbug = 1;
6207
6208         tg3_tx_set_bd(tnapi, entry, mapping, len, base_flags |
6209                       ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6210                       mss, vlan);
6211
6212         entry = NEXT_TX(entry);
6213
6214         /* Now loop through additional data fragments, and queue them. */
6215         if (skb_shinfo(skb)->nr_frags > 0) {
6216                 u32 tmp_mss = mss;
6217
6218                 if (!tg3_flag(tp, HW_TSO_1) &&
6219                     !tg3_flag(tp, HW_TSO_2) &&
6220                     !tg3_flag(tp, HW_TSO_3))
6221                         tmp_mss = 0;
6222
6223                 last = skb_shinfo(skb)->nr_frags - 1;
6224                 for (i = 0; i <= last; i++) {
6225                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6226
6227                         len = frag->size;
6228                         mapping = pci_map_page(tp->pdev,
6229                                                frag->page,
6230                                                frag->page_offset,
6231                                                len, PCI_DMA_TODEVICE);
6232
6233                         tnapi->tx_buffers[entry].skb = NULL;
6234                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6235                                            mapping);
6236                         if (pci_dma_mapping_error(tp->pdev, mapping))
6237                                 goto dma_error;
6238
6239                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6240                             len <= 8)
6241                                 would_hit_hwbug = 1;
6242
6243                         if (tg3_4g_overflow_test(mapping, len))
6244                                 would_hit_hwbug = 1;
6245
6246                         if (tg3_40bit_overflow_test(tp, mapping, len))
6247                                 would_hit_hwbug = 1;
6248
6249                         tg3_tx_set_bd(tnapi, entry, mapping, len, base_flags |
6250                                       ((i == last) ? TXD_FLAG_END : 0),
6251                                       tmp_mss, vlan);
6252
6253                         entry = NEXT_TX(entry);
6254                 }
6255         }
6256
6257         if (would_hit_hwbug) {
6258                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6259
6260                 /* If the workaround fails due to memory/mapping
6261                  * failure, silently drop this packet.
6262                  */
6263                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags,
6264                                                 mss, vlan))
6265                         goto out_unlock;
6266
6267                 entry = NEXT_TX(tnapi->tx_prod);
6268         }
6269
6270         skb_tx_timestamp(skb);
6271
6272         /* Packets are ready, update Tx producer idx local and on card. */
6273         tw32_tx_mbox(tnapi->prodmbox, entry);
6274
6275         tnapi->tx_prod = entry;
6276         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6277                 netif_tx_stop_queue(txq);
6278
6279                 /* netif_tx_stop_queue() must be done before checking
6280                  * checking tx index in tg3_tx_avail() below, because in
6281                  * tg3_tx(), we update tx index before checking for
6282                  * netif_tx_queue_stopped().
6283                  */
6284                 smp_mb();
6285                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6286                         netif_tx_wake_queue(txq);
6287         }
6288
6289 out_unlock:
6290         mmiowb();
6291
6292         return NETDEV_TX_OK;
6293
6294 dma_error:
6295         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6296         dev_kfree_skb(skb);
6297         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6298         return NETDEV_TX_OK;
6299 }
6300
6301 static void tg3_set_loopback(struct net_device *dev, u32 features)
6302 {
6303         struct tg3 *tp = netdev_priv(dev);
6304
6305         if (features & NETIF_F_LOOPBACK) {
6306                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6307                         return;
6308
6309                 /*
6310                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6311                  * loopback mode if Half-Duplex mode was negotiated earlier.
6312                  */
6313                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6314
6315                 /* Enable internal MAC loopback mode */
6316                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6317                 spin_lock_bh(&tp->lock);
6318                 tw32(MAC_MODE, tp->mac_mode);
6319                 netif_carrier_on(tp->dev);
6320                 spin_unlock_bh(&tp->lock);
6321                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6322         } else {
6323                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6324                         return;
6325
6326                 /* Disable internal MAC loopback mode */
6327                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6328                 spin_lock_bh(&tp->lock);
6329                 tw32(MAC_MODE, tp->mac_mode);
6330                 /* Force link status check */
6331                 tg3_setup_phy(tp, 1);
6332                 spin_unlock_bh(&tp->lock);
6333                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6334         }
6335 }
6336
6337 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6338 {
6339         struct tg3 *tp = netdev_priv(dev);
6340
6341         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6342                 features &= ~NETIF_F_ALL_TSO;
6343
6344         return features;
6345 }
6346
6347 static int tg3_set_features(struct net_device *dev, u32 features)
6348 {
6349         u32 changed = dev->features ^ features;
6350
6351         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6352                 tg3_set_loopback(dev, features);
6353
6354         return 0;
6355 }
6356
6357 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6358                                int new_mtu)
6359 {
6360         dev->mtu = new_mtu;
6361
6362         if (new_mtu > ETH_DATA_LEN) {
6363                 if (tg3_flag(tp, 5780_CLASS)) {
6364                         netdev_update_features(dev);
6365                         tg3_flag_clear(tp, TSO_CAPABLE);
6366                 } else {
6367                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6368                 }
6369         } else {
6370                 if (tg3_flag(tp, 5780_CLASS)) {
6371                         tg3_flag_set(tp, TSO_CAPABLE);
6372                         netdev_update_features(dev);
6373                 }
6374                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6375         }
6376 }
6377
6378 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6379 {
6380         struct tg3 *tp = netdev_priv(dev);
6381         int err;
6382
6383         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6384                 return -EINVAL;
6385
6386         if (!netif_running(dev)) {
6387                 /* We'll just catch it later when the
6388                  * device is up'd.
6389                  */
6390                 tg3_set_mtu(dev, tp, new_mtu);
6391                 return 0;
6392         }
6393
6394         tg3_phy_stop(tp);
6395
6396         tg3_netif_stop(tp);
6397
6398         tg3_full_lock(tp, 1);
6399
6400         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6401
6402         tg3_set_mtu(dev, tp, new_mtu);
6403
6404         err = tg3_restart_hw(tp, 0);
6405
6406         if (!err)
6407                 tg3_netif_start(tp);
6408
6409         tg3_full_unlock(tp);
6410
6411         if (!err)
6412                 tg3_phy_start(tp);
6413
6414         return err;
6415 }
6416
6417 static void tg3_rx_prodring_free(struct tg3 *tp,
6418                                  struct tg3_rx_prodring_set *tpr)
6419 {
6420         int i;
6421
6422         if (tpr != &tp->napi[0].prodring) {
6423                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6424                      i = (i + 1) & tp->rx_std_ring_mask)
6425                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6426                                         tp->rx_pkt_map_sz);
6427
6428                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6429                         for (i = tpr->rx_jmb_cons_idx;
6430                              i != tpr->rx_jmb_prod_idx;
6431                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6432                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6433                                                 TG3_RX_JMB_MAP_SZ);
6434                         }
6435                 }
6436
6437                 return;
6438         }
6439
6440         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6441                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6442                                 tp->rx_pkt_map_sz);
6443
6444         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6445                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6446                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6447                                         TG3_RX_JMB_MAP_SZ);
6448         }
6449 }
6450
6451 /* Initialize rx rings for packet processing.
6452  *
6453  * The chip has been shut down and the driver detached from
6454  * the networking, so no interrupts or new tx packets will
6455  * end up in the driver.  tp->{tx,}lock are held and thus
6456  * we may not sleep.
6457  */
6458 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6459                                  struct tg3_rx_prodring_set *tpr)
6460 {
6461         u32 i, rx_pkt_dma_sz;
6462
6463         tpr->rx_std_cons_idx = 0;
6464         tpr->rx_std_prod_idx = 0;
6465         tpr->rx_jmb_cons_idx = 0;
6466         tpr->rx_jmb_prod_idx = 0;
6467
6468         if (tpr != &tp->napi[0].prodring) {
6469                 memset(&tpr->rx_std_buffers[0], 0,
6470                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6471                 if (tpr->rx_jmb_buffers)
6472                         memset(&tpr->rx_jmb_buffers[0], 0,
6473                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6474                 goto done;
6475         }
6476
6477         /* Zero out all descriptors. */
6478         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6479
6480         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6481         if (tg3_flag(tp, 5780_CLASS) &&
6482             tp->dev->mtu > ETH_DATA_LEN)
6483                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6484         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6485
6486         /* Initialize invariants of the rings, we only set this
6487          * stuff once.  This works because the card does not
6488          * write into the rx buffer posting rings.
6489          */
6490         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6491                 struct tg3_rx_buffer_desc *rxd;
6492
6493                 rxd = &tpr->rx_std[i];
6494                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6495                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6496                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6497                                (i << RXD_OPAQUE_INDEX_SHIFT));
6498         }
6499
6500         /* Now allocate fresh SKBs for each rx ring. */
6501         for (i = 0; i < tp->rx_pending; i++) {
6502                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6503                         netdev_warn(tp->dev,
6504                                     "Using a smaller RX standard ring. Only "
6505                                     "%d out of %d buffers were allocated "
6506                                     "successfully\n", i, tp->rx_pending);
6507                         if (i == 0)
6508                                 goto initfail;
6509                         tp->rx_pending = i;
6510                         break;
6511                 }
6512         }
6513
6514         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6515                 goto done;
6516
6517         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6518
6519         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6520                 goto done;
6521
6522         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6523                 struct tg3_rx_buffer_desc *rxd;
6524
6525                 rxd = &tpr->rx_jmb[i].std;
6526                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6527                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6528                                   RXD_FLAG_JUMBO;
6529                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6530                        (i << RXD_OPAQUE_INDEX_SHIFT));
6531         }
6532
6533         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6534                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6535                         netdev_warn(tp->dev,
6536                                     "Using a smaller RX jumbo ring. Only %d "
6537                                     "out of %d buffers were allocated "
6538                                     "successfully\n", i, tp->rx_jumbo_pending);
6539                         if (i == 0)
6540                                 goto initfail;
6541                         tp->rx_jumbo_pending = i;
6542                         break;
6543                 }
6544         }
6545
6546 done:
6547         return 0;
6548
6549 initfail:
6550         tg3_rx_prodring_free(tp, tpr);
6551         return -ENOMEM;
6552 }
6553
6554 static void tg3_rx_prodring_fini(struct tg3 *tp,
6555                                  struct tg3_rx_prodring_set *tpr)
6556 {
6557         kfree(tpr->rx_std_buffers);
6558         tpr->rx_std_buffers = NULL;
6559         kfree(tpr->rx_jmb_buffers);
6560         tpr->rx_jmb_buffers = NULL;
6561         if (tpr->rx_std) {
6562                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6563                                   tpr->rx_std, tpr->rx_std_mapping);
6564                 tpr->rx_std = NULL;
6565         }
6566         if (tpr->rx_jmb) {
6567                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6568                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6569                 tpr->rx_jmb = NULL;
6570         }
6571 }
6572
6573 static int tg3_rx_prodring_init(struct tg3 *tp,
6574                                 struct tg3_rx_prodring_set *tpr)
6575 {
6576         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6577                                       GFP_KERNEL);
6578         if (!tpr->rx_std_buffers)
6579                 return -ENOMEM;
6580
6581         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6582                                          TG3_RX_STD_RING_BYTES(tp),
6583                                          &tpr->rx_std_mapping,
6584                                          GFP_KERNEL);
6585         if (!tpr->rx_std)
6586                 goto err_out;
6587
6588         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6589                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6590                                               GFP_KERNEL);
6591                 if (!tpr->rx_jmb_buffers)
6592                         goto err_out;
6593
6594                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6595                                                  TG3_RX_JMB_RING_BYTES(tp),
6596                                                  &tpr->rx_jmb_mapping,
6597                                                  GFP_KERNEL);
6598                 if (!tpr->rx_jmb)
6599                         goto err_out;
6600         }
6601
6602         return 0;
6603
6604 err_out:
6605         tg3_rx_prodring_fini(tp, tpr);
6606         return -ENOMEM;
6607 }
6608
6609 /* Free up pending packets in all rx/tx rings.
6610  *
6611  * The chip has been shut down and the driver detached from
6612  * the networking, so no interrupts or new tx packets will
6613  * end up in the driver.  tp->{tx,}lock is not held and we are not
6614  * in an interrupt context and thus may sleep.
6615  */
6616 static void tg3_free_rings(struct tg3 *tp)
6617 {
6618         int i, j;
6619
6620         for (j = 0; j < tp->irq_cnt; j++) {
6621                 struct tg3_napi *tnapi = &tp->napi[j];
6622
6623                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6624
6625                 if (!tnapi->tx_buffers)
6626                         continue;
6627
6628                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6629                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6630
6631                         if (!skb)
6632                                 continue;
6633
6634                         tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6635
6636                         dev_kfree_skb_any(skb);
6637                 }
6638         }
6639 }
6640
6641 /* Initialize tx/rx rings for packet processing.
6642  *
6643  * The chip has been shut down and the driver detached from
6644  * the networking, so no interrupts or new tx packets will
6645  * end up in the driver.  tp->{tx,}lock are held and thus
6646  * we may not sleep.
6647  */
6648 static int tg3_init_rings(struct tg3 *tp)
6649 {
6650         int i;
6651
6652         /* Free up all the SKBs. */
6653         tg3_free_rings(tp);
6654
6655         for (i = 0; i < tp->irq_cnt; i++) {
6656                 struct tg3_napi *tnapi = &tp->napi[i];
6657
6658                 tnapi->last_tag = 0;
6659                 tnapi->last_irq_tag = 0;
6660                 tnapi->hw_status->status = 0;
6661                 tnapi->hw_status->status_tag = 0;
6662                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6663
6664                 tnapi->tx_prod = 0;
6665                 tnapi->tx_cons = 0;
6666                 if (tnapi->tx_ring)
6667                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6668
6669                 tnapi->rx_rcb_ptr = 0;
6670                 if (tnapi->rx_rcb)
6671                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6672
6673                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6674                         tg3_free_rings(tp);
6675                         return -ENOMEM;
6676                 }
6677         }
6678
6679         return 0;
6680 }
6681
6682 /*
6683  * Must not be invoked with interrupt sources disabled and
6684  * the hardware shutdown down.
6685  */
6686 static void tg3_free_consistent(struct tg3 *tp)
6687 {
6688         int i;
6689
6690         for (i = 0; i < tp->irq_cnt; i++) {
6691                 struct tg3_napi *tnapi = &tp->napi[i];
6692
6693                 if (tnapi->tx_ring) {
6694                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6695                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6696                         tnapi->tx_ring = NULL;
6697                 }
6698
6699                 kfree(tnapi->tx_buffers);
6700                 tnapi->tx_buffers = NULL;
6701
6702                 if (tnapi->rx_rcb) {
6703                         dma_free_coherent(&tp->pdev->dev,
6704                                           TG3_RX_RCB_RING_BYTES(tp),
6705                                           tnapi->rx_rcb,
6706                                           tnapi->rx_rcb_mapping);
6707                         tnapi->rx_rcb = NULL;
6708                 }
6709
6710                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6711
6712                 if (tnapi->hw_status) {
6713                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6714                                           tnapi->hw_status,
6715                                           tnapi->status_mapping);
6716                         tnapi->hw_status = NULL;
6717                 }
6718         }
6719
6720         if (tp->hw_stats) {
6721                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6722                                   tp->hw_stats, tp->stats_mapping);
6723                 tp->hw_stats = NULL;
6724         }
6725 }
6726
6727 /*
6728  * Must not be invoked with interrupt sources disabled and
6729  * the hardware shutdown down.  Can sleep.
6730  */
6731 static int tg3_alloc_consistent(struct tg3 *tp)
6732 {
6733         int i;
6734
6735         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6736                                           sizeof(struct tg3_hw_stats),
6737                                           &tp->stats_mapping,
6738                                           GFP_KERNEL);
6739         if (!tp->hw_stats)
6740                 goto err_out;
6741
6742         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6743
6744         for (i = 0; i < tp->irq_cnt; i++) {
6745                 struct tg3_napi *tnapi = &tp->napi[i];
6746                 struct tg3_hw_status *sblk;
6747
6748                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6749                                                       TG3_HW_STATUS_SIZE,
6750                                                       &tnapi->status_mapping,
6751                                                       GFP_KERNEL);
6752                 if (!tnapi->hw_status)
6753                         goto err_out;
6754
6755                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6756                 sblk = tnapi->hw_status;
6757
6758                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6759                         goto err_out;
6760
6761                 /* If multivector TSS is enabled, vector 0 does not handle
6762                  * tx interrupts.  Don't allocate any resources for it.
6763                  */
6764                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6765                     (i && tg3_flag(tp, ENABLE_TSS))) {
6766                         tnapi->tx_buffers = kzalloc(
6767                                                sizeof(struct tg3_tx_ring_info) *
6768                                                TG3_TX_RING_SIZE, GFP_KERNEL);
6769                         if (!tnapi->tx_buffers)
6770                                 goto err_out;
6771
6772                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6773                                                             TG3_TX_RING_BYTES,
6774                                                         &tnapi->tx_desc_mapping,
6775                                                             GFP_KERNEL);
6776                         if (!tnapi->tx_ring)
6777                                 goto err_out;
6778                 }
6779
6780                 /*
6781                  * When RSS is enabled, the status block format changes
6782                  * slightly.  The "rx_jumbo_consumer", "reserved",
6783                  * and "rx_mini_consumer" members get mapped to the
6784                  * other three rx return ring producer indexes.
6785                  */
6786                 switch (i) {
6787                 default:
6788                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6789                         break;
6790                 case 2:
6791                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6792                         break;
6793                 case 3:
6794                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6795                         break;
6796                 case 4:
6797                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6798                         break;
6799                 }
6800
6801                 /*
6802                  * If multivector RSS is enabled, vector 0 does not handle
6803                  * rx or tx interrupts.  Don't allocate any resources for it.
6804                  */
6805                 if (!i && tg3_flag(tp, ENABLE_RSS))
6806                         continue;
6807
6808                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6809                                                    TG3_RX_RCB_RING_BYTES(tp),
6810                                                    &tnapi->rx_rcb_mapping,
6811                                                    GFP_KERNEL);
6812                 if (!tnapi->rx_rcb)
6813                         goto err_out;
6814
6815                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6816         }
6817
6818         return 0;
6819
6820 err_out:
6821         tg3_free_consistent(tp);
6822         return -ENOMEM;
6823 }
6824
6825 #define MAX_WAIT_CNT 1000
6826
6827 /* To stop a block, clear the enable bit and poll till it
6828  * clears.  tp->lock is held.
6829  */
6830 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6831 {
6832         unsigned int i;
6833         u32 val;
6834
6835         if (tg3_flag(tp, 5705_PLUS)) {
6836                 switch (ofs) {
6837                 case RCVLSC_MODE:
6838                 case DMAC_MODE:
6839                 case MBFREE_MODE:
6840                 case BUFMGR_MODE:
6841                 case MEMARB_MODE:
6842                         /* We can't enable/disable these bits of the
6843                          * 5705/5750, just say success.
6844                          */
6845                         return 0;
6846
6847                 default:
6848                         break;
6849                 }
6850         }
6851
6852         val = tr32(ofs);
6853         val &= ~enable_bit;
6854         tw32_f(ofs, val);
6855
6856         for (i = 0; i < MAX_WAIT_CNT; i++) {
6857                 udelay(100);
6858                 val = tr32(ofs);
6859                 if ((val & enable_bit) == 0)
6860                         break;
6861         }
6862
6863         if (i == MAX_WAIT_CNT && !silent) {
6864                 dev_err(&tp->pdev->dev,
6865                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6866                         ofs, enable_bit);
6867                 return -ENODEV;
6868         }
6869
6870         return 0;
6871 }
6872
6873 /* tp->lock is held. */
6874 static int tg3_abort_hw(struct tg3 *tp, int silent)
6875 {
6876         int i, err;
6877
6878         tg3_disable_ints(tp);
6879
6880         tp->rx_mode &= ~RX_MODE_ENABLE;
6881         tw32_f(MAC_RX_MODE, tp->rx_mode);
6882         udelay(10);
6883
6884         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6885         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6886         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6887         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6888         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6889         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6890
6891         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6892         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6893         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6894         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6895         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6896         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6897         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6898
6899         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6900         tw32_f(MAC_MODE, tp->mac_mode);
6901         udelay(40);
6902
6903         tp->tx_mode &= ~TX_MODE_ENABLE;
6904         tw32_f(MAC_TX_MODE, tp->tx_mode);
6905
6906         for (i = 0; i < MAX_WAIT_CNT; i++) {
6907                 udelay(100);
6908                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6909                         break;
6910         }
6911         if (i >= MAX_WAIT_CNT) {
6912                 dev_err(&tp->pdev->dev,
6913                         "%s timed out, TX_MODE_ENABLE will not clear "
6914                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6915                 err |= -ENODEV;
6916         }
6917
6918         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6919         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6920         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6921
6922         tw32(FTQ_RESET, 0xffffffff);
6923         tw32(FTQ_RESET, 0x00000000);
6924
6925         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6926         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6927
6928         for (i = 0; i < tp->irq_cnt; i++) {
6929                 struct tg3_napi *tnapi = &tp->napi[i];
6930                 if (tnapi->hw_status)
6931                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6932         }
6933         if (tp->hw_stats)
6934                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6935
6936         return err;
6937 }
6938
6939 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6940 {
6941         int i;
6942         u32 apedata;
6943
6944         /* NCSI does not support APE events */
6945         if (tg3_flag(tp, APE_HAS_NCSI))
6946                 return;
6947
6948         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6949         if (apedata != APE_SEG_SIG_MAGIC)
6950                 return;
6951
6952         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6953         if (!(apedata & APE_FW_STATUS_READY))
6954                 return;
6955
6956         /* Wait for up to 1 millisecond for APE to service previous event. */
6957         for (i = 0; i < 10; i++) {
6958                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6959                         return;
6960
6961                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6962
6963                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6964                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6965                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6966
6967                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6968
6969                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6970                         break;
6971
6972                 udelay(100);
6973         }
6974
6975         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6976                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6977 }
6978
6979 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6980 {
6981         u32 event;
6982         u32 apedata;
6983
6984         if (!tg3_flag(tp, ENABLE_APE))
6985                 return;
6986
6987         switch (kind) {
6988         case RESET_KIND_INIT:
6989                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6990                                 APE_HOST_SEG_SIG_MAGIC);
6991                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6992                                 APE_HOST_SEG_LEN_MAGIC);
6993                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6994                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6995                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6996                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6997                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6998                                 APE_HOST_BEHAV_NO_PHYLOCK);
6999                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7000                                     TG3_APE_HOST_DRVR_STATE_START);
7001
7002                 event = APE_EVENT_STATUS_STATE_START;
7003                 break;
7004         case RESET_KIND_SHUTDOWN:
7005                 /* With the interface we are currently using,
7006                  * APE does not track driver state.  Wiping
7007                  * out the HOST SEGMENT SIGNATURE forces
7008                  * the APE to assume OS absent status.
7009                  */
7010                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7011
7012                 if (device_may_wakeup(&tp->pdev->dev) &&
7013                     tg3_flag(tp, WOL_ENABLE)) {
7014                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7015                                             TG3_APE_HOST_WOL_SPEED_AUTO);
7016                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7017                 } else
7018                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7019
7020                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7021
7022                 event = APE_EVENT_STATUS_STATE_UNLOAD;
7023                 break;
7024         case RESET_KIND_SUSPEND:
7025                 event = APE_EVENT_STATUS_STATE_SUSPEND;
7026                 break;
7027         default:
7028                 return;
7029         }
7030
7031         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7032
7033         tg3_ape_send_event(tp, event);
7034 }
7035
7036 /* tp->lock is held. */
7037 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7038 {
7039         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7040                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7041
7042         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7043                 switch (kind) {
7044                 case RESET_KIND_INIT:
7045                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7046                                       DRV_STATE_START);
7047                         break;
7048
7049                 case RESET_KIND_SHUTDOWN:
7050                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7051                                       DRV_STATE_UNLOAD);
7052                         break;
7053
7054                 case RESET_KIND_SUSPEND:
7055                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7056                                       DRV_STATE_SUSPEND);
7057                         break;
7058
7059                 default:
7060                         break;
7061                 }
7062         }
7063
7064         if (kind == RESET_KIND_INIT ||
7065             kind == RESET_KIND_SUSPEND)
7066                 tg3_ape_driver_state_change(tp, kind);
7067 }
7068
7069 /* tp->lock is held. */
7070 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7071 {
7072         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7073                 switch (kind) {
7074                 case RESET_KIND_INIT:
7075                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7076                                       DRV_STATE_START_DONE);
7077                         break;
7078
7079                 case RESET_KIND_SHUTDOWN:
7080                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7081                                       DRV_STATE_UNLOAD_DONE);
7082                         break;
7083
7084                 default:
7085                         break;
7086                 }
7087         }
7088
7089         if (kind == RESET_KIND_SHUTDOWN)
7090                 tg3_ape_driver_state_change(tp, kind);
7091 }
7092
7093 /* tp->lock is held. */
7094 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7095 {
7096         if (tg3_flag(tp, ENABLE_ASF)) {
7097                 switch (kind) {
7098                 case RESET_KIND_INIT:
7099                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7100                                       DRV_STATE_START);
7101                         break;
7102
7103                 case RESET_KIND_SHUTDOWN:
7104                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7105                                       DRV_STATE_UNLOAD);
7106                         break;
7107
7108                 case RESET_KIND_SUSPEND:
7109                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7110                                       DRV_STATE_SUSPEND);
7111                         break;
7112
7113                 default:
7114                         break;
7115                 }
7116         }
7117 }
7118
7119 static int tg3_poll_fw(struct tg3 *tp)
7120 {
7121         int i;
7122         u32 val;
7123
7124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7125                 /* Wait up to 20ms for init done. */
7126                 for (i = 0; i < 200; i++) {
7127                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7128                                 return 0;
7129                         udelay(100);
7130                 }
7131                 return -ENODEV;
7132         }
7133
7134         /* Wait for firmware initialization to complete. */
7135         for (i = 0; i < 100000; i++) {
7136                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7137                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7138                         break;
7139                 udelay(10);
7140         }
7141
7142         /* Chip might not be fitted with firmware.  Some Sun onboard
7143          * parts are configured like that.  So don't signal the timeout
7144          * of the above loop as an error, but do report the lack of
7145          * running firmware once.
7146          */
7147         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7148                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7149
7150                 netdev_info(tp->dev, "No firmware running\n");
7151         }
7152
7153         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7154                 /* The 57765 A0 needs a little more
7155                  * time to do some important work.
7156                  */
7157                 mdelay(10);
7158         }
7159
7160         return 0;
7161 }
7162
7163 /* Save PCI command register before chip reset */
7164 static void tg3_save_pci_state(struct tg3 *tp)
7165 {
7166         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7167 }
7168
7169 /* Restore PCI state after chip reset */
7170 static void tg3_restore_pci_state(struct tg3 *tp)
7171 {
7172         u32 val;
7173
7174         /* Re-enable indirect register accesses. */
7175         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7176                                tp->misc_host_ctrl);
7177
7178         /* Set MAX PCI retry to zero. */
7179         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7180         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7181             tg3_flag(tp, PCIX_MODE))
7182                 val |= PCISTATE_RETRY_SAME_DMA;
7183         /* Allow reads and writes to the APE register and memory space. */
7184         if (tg3_flag(tp, ENABLE_APE))
7185                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7186                        PCISTATE_ALLOW_APE_SHMEM_WR |
7187                        PCISTATE_ALLOW_APE_PSPACE_WR;
7188         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7189
7190         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7191
7192         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7193                 if (tg3_flag(tp, PCI_EXPRESS))
7194                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7195                 else {
7196                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7197                                               tp->pci_cacheline_sz);
7198                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7199                                               tp->pci_lat_timer);
7200                 }
7201         }
7202
7203         /* Make sure PCI-X relaxed ordering bit is clear. */
7204         if (tg3_flag(tp, PCIX_MODE)) {
7205                 u16 pcix_cmd;
7206
7207                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7208                                      &pcix_cmd);
7209                 pcix_cmd &= ~PCI_X_CMD_ERO;
7210                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7211                                       pcix_cmd);
7212         }
7213
7214         if (tg3_flag(tp, 5780_CLASS)) {
7215
7216                 /* Chip reset on 5780 will reset MSI enable bit,
7217                  * so need to restore it.
7218                  */
7219                 if (tg3_flag(tp, USING_MSI)) {
7220                         u16 ctrl;
7221
7222                         pci_read_config_word(tp->pdev,
7223                                              tp->msi_cap + PCI_MSI_FLAGS,
7224                                              &ctrl);
7225                         pci_write_config_word(tp->pdev,
7226                                               tp->msi_cap + PCI_MSI_FLAGS,
7227                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7228                         val = tr32(MSGINT_MODE);
7229                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7230                 }
7231         }
7232 }
7233
7234 static void tg3_stop_fw(struct tg3 *);
7235
7236 /* tp->lock is held. */
7237 static int tg3_chip_reset(struct tg3 *tp)
7238 {
7239         u32 val;
7240         void (*write_op)(struct tg3 *, u32, u32);
7241         int i, err;
7242
7243         tg3_nvram_lock(tp);
7244
7245         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7246
7247         /* No matching tg3_nvram_unlock() after this because
7248          * chip reset below will undo the nvram lock.
7249          */
7250         tp->nvram_lock_cnt = 0;
7251
7252         /* GRC_MISC_CFG core clock reset will clear the memory
7253          * enable bit in PCI register 4 and the MSI enable bit
7254          * on some chips, so we save relevant registers here.
7255          */
7256         tg3_save_pci_state(tp);
7257
7258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7259             tg3_flag(tp, 5755_PLUS))
7260                 tw32(GRC_FASTBOOT_PC, 0);
7261
7262         /*
7263          * We must avoid the readl() that normally takes place.
7264          * It locks machines, causes machine checks, and other
7265          * fun things.  So, temporarily disable the 5701
7266          * hardware workaround, while we do the reset.
7267          */
7268         write_op = tp->write32;
7269         if (write_op == tg3_write_flush_reg32)
7270                 tp->write32 = tg3_write32;
7271
7272         /* Prevent the irq handler from reading or writing PCI registers
7273          * during chip reset when the memory enable bit in the PCI command
7274          * register may be cleared.  The chip does not generate interrupt
7275          * at this time, but the irq handler may still be called due to irq
7276          * sharing or irqpoll.
7277          */
7278         tg3_flag_set(tp, CHIP_RESETTING);
7279         for (i = 0; i < tp->irq_cnt; i++) {
7280                 struct tg3_napi *tnapi = &tp->napi[i];
7281                 if (tnapi->hw_status) {
7282                         tnapi->hw_status->status = 0;
7283                         tnapi->hw_status->status_tag = 0;
7284                 }
7285                 tnapi->last_tag = 0;
7286                 tnapi->last_irq_tag = 0;
7287         }
7288         smp_mb();
7289
7290         for (i = 0; i < tp->irq_cnt; i++)
7291                 synchronize_irq(tp->napi[i].irq_vec);
7292
7293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7294                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7295                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7296         }
7297
7298         /* do the reset */
7299         val = GRC_MISC_CFG_CORECLK_RESET;
7300
7301         if (tg3_flag(tp, PCI_EXPRESS)) {
7302                 /* Force PCIe 1.0a mode */
7303                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7304                     !tg3_flag(tp, 57765_PLUS) &&
7305                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7306                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7307                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7308
7309                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7310                         tw32(GRC_MISC_CFG, (1 << 29));
7311                         val |= (1 << 29);
7312                 }
7313         }
7314
7315         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7316                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7317                 tw32(GRC_VCPU_EXT_CTRL,
7318                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7319         }
7320
7321         /* Manage gphy power for all CPMU absent PCIe devices. */
7322         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7323                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7324
7325         tw32(GRC_MISC_CFG, val);
7326
7327         /* restore 5701 hardware bug workaround write method */
7328         tp->write32 = write_op;
7329
7330         /* Unfortunately, we have to delay before the PCI read back.
7331          * Some 575X chips even will not respond to a PCI cfg access
7332          * when the reset command is given to the chip.
7333          *
7334          * How do these hardware designers expect things to work
7335          * properly if the PCI write is posted for a long period
7336          * of time?  It is always necessary to have some method by
7337          * which a register read back can occur to push the write
7338          * out which does the reset.
7339          *
7340          * For most tg3 variants the trick below was working.
7341          * Ho hum...
7342          */
7343         udelay(120);
7344
7345         /* Flush PCI posted writes.  The normal MMIO registers
7346          * are inaccessible at this time so this is the only
7347          * way to make this reliably (actually, this is no longer
7348          * the case, see above).  I tried to use indirect
7349          * register read/write but this upset some 5701 variants.
7350          */
7351         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7352
7353         udelay(120);
7354
7355         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7356                 u16 val16;
7357
7358                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7359                         int i;
7360                         u32 cfg_val;
7361
7362                         /* Wait for link training to complete.  */
7363                         for (i = 0; i < 5000; i++)
7364                                 udelay(100);
7365
7366                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7367                         pci_write_config_dword(tp->pdev, 0xc4,
7368                                                cfg_val | (1 << 15));
7369                 }
7370
7371                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7372                 pci_read_config_word(tp->pdev,
7373                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7374                                      &val16);
7375                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7376                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7377                 /*
7378                  * Older PCIe devices only support the 128 byte
7379                  * MPS setting.  Enforce the restriction.
7380                  */
7381                 if (!tg3_flag(tp, CPMU_PRESENT))
7382                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7383                 pci_write_config_word(tp->pdev,
7384                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7385                                       val16);
7386
7387                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7388
7389                 /* Clear error status */
7390                 pci_write_config_word(tp->pdev,
7391                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7392                                       PCI_EXP_DEVSTA_CED |
7393                                       PCI_EXP_DEVSTA_NFED |
7394                                       PCI_EXP_DEVSTA_FED |
7395                                       PCI_EXP_DEVSTA_URD);
7396         }
7397
7398         tg3_restore_pci_state(tp);
7399
7400         tg3_flag_clear(tp, CHIP_RESETTING);
7401         tg3_flag_clear(tp, ERROR_PROCESSED);
7402
7403         val = 0;
7404         if (tg3_flag(tp, 5780_CLASS))
7405                 val = tr32(MEMARB_MODE);
7406         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7407
7408         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7409                 tg3_stop_fw(tp);
7410                 tw32(0x5000, 0x400);
7411         }
7412
7413         tw32(GRC_MODE, tp->grc_mode);
7414
7415         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7416                 val = tr32(0xc4);
7417
7418                 tw32(0xc4, val | (1 << 15));
7419         }
7420
7421         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7422             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7423                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7424                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7425                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7426                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7427         }
7428
7429         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7430                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7431                 val = tp->mac_mode;
7432         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7433                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7434                 val = tp->mac_mode;
7435         } else
7436                 val = 0;
7437
7438         tw32_f(MAC_MODE, val);
7439         udelay(40);
7440
7441         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7442
7443         err = tg3_poll_fw(tp);
7444         if (err)
7445                 return err;
7446
7447         tg3_mdio_start(tp);
7448
7449         if (tg3_flag(tp, PCI_EXPRESS) &&
7450             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7451             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7452             !tg3_flag(tp, 57765_PLUS)) {
7453                 val = tr32(0x7c00);
7454
7455                 tw32(0x7c00, val | (1 << 25));
7456         }
7457
7458         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7459                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7460                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7461         }
7462
7463         /* Reprobe ASF enable state.  */
7464         tg3_flag_clear(tp, ENABLE_ASF);
7465         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7466         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7467         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7468                 u32 nic_cfg;
7469
7470                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7471                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7472                         tg3_flag_set(tp, ENABLE_ASF);
7473                         tp->last_event_jiffies = jiffies;
7474                         if (tg3_flag(tp, 5750_PLUS))
7475                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7476                 }
7477         }
7478
7479         return 0;
7480 }
7481
7482 /* tp->lock is held. */
7483 static void tg3_stop_fw(struct tg3 *tp)
7484 {
7485         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7486                 /* Wait for RX cpu to ACK the previous event. */
7487                 tg3_wait_for_event_ack(tp);
7488
7489                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7490
7491                 tg3_generate_fw_event(tp);
7492
7493                 /* Wait for RX cpu to ACK this event. */
7494                 tg3_wait_for_event_ack(tp);
7495         }
7496 }
7497
7498 /* tp->lock is held. */
7499 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7500 {
7501         int err;
7502
7503         tg3_stop_fw(tp);
7504
7505         tg3_write_sig_pre_reset(tp, kind);
7506
7507         tg3_abort_hw(tp, silent);
7508         err = tg3_chip_reset(tp);
7509
7510         __tg3_set_mac_addr(tp, 0);
7511
7512         tg3_write_sig_legacy(tp, kind);
7513         tg3_write_sig_post_reset(tp, kind);
7514
7515         if (err)
7516                 return err;
7517
7518         return 0;
7519 }
7520
7521 #define RX_CPU_SCRATCH_BASE     0x30000
7522 #define RX_CPU_SCRATCH_SIZE     0x04000
7523 #define TX_CPU_SCRATCH_BASE     0x34000
7524 #define TX_CPU_SCRATCH_SIZE     0x04000
7525
7526 /* tp->lock is held. */
7527 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7528 {
7529         int i;
7530
7531         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7532
7533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7534                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7535
7536                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7537                 return 0;
7538         }
7539         if (offset == RX_CPU_BASE) {
7540                 for (i = 0; i < 10000; i++) {
7541                         tw32(offset + CPU_STATE, 0xffffffff);
7542                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7543                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7544                                 break;
7545                 }
7546
7547                 tw32(offset + CPU_STATE, 0xffffffff);
7548                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7549                 udelay(10);
7550         } else {
7551                 for (i = 0; i < 10000; i++) {
7552                         tw32(offset + CPU_STATE, 0xffffffff);
7553                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7554                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7555                                 break;
7556                 }
7557         }
7558
7559         if (i >= 10000) {
7560                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7561                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7562                 return -ENODEV;
7563         }
7564
7565         /* Clear firmware's nvram arbitration. */
7566         if (tg3_flag(tp, NVRAM))
7567                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7568         return 0;
7569 }
7570
7571 struct fw_info {
7572         unsigned int fw_base;
7573         unsigned int fw_len;
7574         const __be32 *fw_data;
7575 };
7576
7577 /* tp->lock is held. */
7578 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7579                                  int cpu_scratch_size, struct fw_info *info)
7580 {
7581         int err, lock_err, i;
7582         void (*write_op)(struct tg3 *, u32, u32);
7583
7584         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7585                 netdev_err(tp->dev,
7586                            "%s: Trying to load TX cpu firmware which is 5705\n",
7587                            __func__);
7588                 return -EINVAL;
7589         }
7590
7591         if (tg3_flag(tp, 5705_PLUS))
7592                 write_op = tg3_write_mem;
7593         else
7594                 write_op = tg3_write_indirect_reg32;
7595
7596         /* It is possible that bootcode is still loading at this point.
7597          * Get the nvram lock first before halting the cpu.
7598          */
7599         lock_err = tg3_nvram_lock(tp);
7600         err = tg3_halt_cpu(tp, cpu_base);
7601         if (!lock_err)
7602                 tg3_nvram_unlock(tp);
7603         if (err)
7604                 goto out;
7605
7606         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7607                 write_op(tp, cpu_scratch_base + i, 0);
7608         tw32(cpu_base + CPU_STATE, 0xffffffff);
7609         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7610         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7611                 write_op(tp, (cpu_scratch_base +
7612                               (info->fw_base & 0xffff) +
7613                               (i * sizeof(u32))),
7614                               be32_to_cpu(info->fw_data[i]));
7615
7616         err = 0;
7617
7618 out:
7619         return err;
7620 }
7621
7622 /* tp->lock is held. */
7623 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7624 {
7625         struct fw_info info;
7626         const __be32 *fw_data;
7627         int err, i;
7628
7629         fw_data = (void *)tp->fw->data;
7630
7631         /* Firmware blob starts with version numbers, followed by
7632            start address and length. We are setting complete length.
7633            length = end_address_of_bss - start_address_of_text.
7634            Remainder is the blob to be loaded contiguously
7635            from start address. */
7636
7637         info.fw_base = be32_to_cpu(fw_data[1]);
7638         info.fw_len = tp->fw->size - 12;
7639         info.fw_data = &fw_data[3];
7640
7641         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7642                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7643                                     &info);
7644         if (err)
7645                 return err;
7646
7647         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7648                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7649                                     &info);
7650         if (err)
7651                 return err;
7652
7653         /* Now startup only the RX cpu. */
7654         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7655         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7656
7657         for (i = 0; i < 5; i++) {
7658                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7659                         break;
7660                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7661                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7662                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7663                 udelay(1000);
7664         }
7665         if (i >= 5) {
7666                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7667                            "should be %08x\n", __func__,
7668                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7669                 return -ENODEV;
7670         }
7671         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7672         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7673
7674         return 0;
7675 }
7676
7677 /* tp->lock is held. */
7678 static int tg3_load_tso_firmware(struct tg3 *tp)
7679 {
7680         struct fw_info info;
7681         const __be32 *fw_data;
7682         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7683         int err, i;
7684
7685         if (tg3_flag(tp, HW_TSO_1) ||
7686             tg3_flag(tp, HW_TSO_2) ||
7687             tg3_flag(tp, HW_TSO_3))
7688                 return 0;
7689
7690         fw_data = (void *)tp->fw->data;
7691
7692         /* Firmware blob starts with version numbers, followed by
7693            start address and length. We are setting complete length.
7694            length = end_address_of_bss - start_address_of_text.
7695            Remainder is the blob to be loaded contiguously
7696            from start address. */
7697
7698         info.fw_base = be32_to_cpu(fw_data[1]);
7699         cpu_scratch_size = tp->fw_len;
7700         info.fw_len = tp->fw->size - 12;
7701         info.fw_data = &fw_data[3];
7702
7703         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7704                 cpu_base = RX_CPU_BASE;
7705                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7706         } else {
7707                 cpu_base = TX_CPU_BASE;
7708                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7709                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7710         }
7711
7712         err = tg3_load_firmware_cpu(tp, cpu_base,
7713                                     cpu_scratch_base, cpu_scratch_size,
7714                                     &info);
7715         if (err)
7716                 return err;
7717
7718         /* Now startup the cpu. */
7719         tw32(cpu_base + CPU_STATE, 0xffffffff);
7720         tw32_f(cpu_base + CPU_PC, info.fw_base);
7721
7722         for (i = 0; i < 5; i++) {
7723                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7724                         break;
7725                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7726                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7727                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7728                 udelay(1000);
7729         }
7730         if (i >= 5) {
7731                 netdev_err(tp->dev,
7732                            "%s fails to set CPU PC, is %08x should be %08x\n",
7733                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7734                 return -ENODEV;
7735         }
7736         tw32(cpu_base + CPU_STATE, 0xffffffff);
7737         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7738         return 0;
7739 }
7740
7741
7742 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7743 {
7744         struct tg3 *tp = netdev_priv(dev);
7745         struct sockaddr *addr = p;
7746         int err = 0, skip_mac_1 = 0;
7747
7748         if (!is_valid_ether_addr(addr->sa_data))
7749                 return -EINVAL;
7750
7751         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7752
7753         if (!netif_running(dev))
7754                 return 0;
7755
7756         if (tg3_flag(tp, ENABLE_ASF)) {
7757                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7758
7759                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7760                 addr0_low = tr32(MAC_ADDR_0_LOW);
7761                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7762                 addr1_low = tr32(MAC_ADDR_1_LOW);
7763
7764                 /* Skip MAC addr 1 if ASF is using it. */
7765                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7766                     !(addr1_high == 0 && addr1_low == 0))
7767                         skip_mac_1 = 1;
7768         }
7769         spin_lock_bh(&tp->lock);
7770         __tg3_set_mac_addr(tp, skip_mac_1);
7771         spin_unlock_bh(&tp->lock);
7772
7773         return err;
7774 }
7775
7776 /* tp->lock is held. */
7777 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7778                            dma_addr_t mapping, u32 maxlen_flags,
7779                            u32 nic_addr)
7780 {
7781         tg3_write_mem(tp,
7782                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7783                       ((u64) mapping >> 32));
7784         tg3_write_mem(tp,
7785                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7786                       ((u64) mapping & 0xffffffff));
7787         tg3_write_mem(tp,
7788                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7789                        maxlen_flags);
7790
7791         if (!tg3_flag(tp, 5705_PLUS))
7792                 tg3_write_mem(tp,
7793                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7794                               nic_addr);
7795 }
7796
7797 static void __tg3_set_rx_mode(struct net_device *);
7798 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7799 {
7800         int i;
7801
7802         if (!tg3_flag(tp, ENABLE_TSS)) {
7803                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7804                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7805                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7806         } else {
7807                 tw32(HOSTCC_TXCOL_TICKS, 0);
7808                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7809                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7810         }
7811
7812         if (!tg3_flag(tp, ENABLE_RSS)) {
7813                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7814                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7815                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7816         } else {
7817                 tw32(HOSTCC_RXCOL_TICKS, 0);
7818                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7819                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7820         }
7821
7822         if (!tg3_flag(tp, 5705_PLUS)) {
7823                 u32 val = ec->stats_block_coalesce_usecs;
7824
7825                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7826                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7827
7828                 if (!netif_carrier_ok(tp->dev))
7829                         val = 0;
7830
7831                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7832         }
7833
7834         for (i = 0; i < tp->irq_cnt - 1; i++) {
7835                 u32 reg;
7836
7837                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7838                 tw32(reg, ec->rx_coalesce_usecs);
7839                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7840                 tw32(reg, ec->rx_max_coalesced_frames);
7841                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7842                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7843
7844                 if (tg3_flag(tp, ENABLE_TSS)) {
7845                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7846                         tw32(reg, ec->tx_coalesce_usecs);
7847                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7848                         tw32(reg, ec->tx_max_coalesced_frames);
7849                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7850                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7851                 }
7852         }
7853
7854         for (; i < tp->irq_max - 1; i++) {
7855                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7856                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7857                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7858
7859                 if (tg3_flag(tp, ENABLE_TSS)) {
7860                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7861                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7862                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7863                 }
7864         }
7865 }
7866
7867 /* tp->lock is held. */
7868 static void tg3_rings_reset(struct tg3 *tp)
7869 {
7870         int i;
7871         u32 stblk, txrcb, rxrcb, limit;
7872         struct tg3_napi *tnapi = &tp->napi[0];
7873
7874         /* Disable all transmit rings but the first. */
7875         if (!tg3_flag(tp, 5705_PLUS))
7876                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7877         else if (tg3_flag(tp, 5717_PLUS))
7878                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7879         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7880                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7881         else
7882                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7883
7884         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7885              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7886                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7887                               BDINFO_FLAGS_DISABLED);
7888
7889
7890         /* Disable all receive return rings but the first. */
7891         if (tg3_flag(tp, 5717_PLUS))
7892                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7893         else if (!tg3_flag(tp, 5705_PLUS))
7894                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7895         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7896                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7897                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7898         else
7899                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7900
7901         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7902              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7903                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7904                               BDINFO_FLAGS_DISABLED);
7905
7906         /* Disable interrupts */
7907         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7908         tp->napi[0].chk_msi_cnt = 0;
7909         tp->napi[0].last_rx_cons = 0;
7910         tp->napi[0].last_tx_cons = 0;
7911
7912         /* Zero mailbox registers. */
7913         if (tg3_flag(tp, SUPPORT_MSIX)) {
7914                 for (i = 1; i < tp->irq_max; i++) {
7915                         tp->napi[i].tx_prod = 0;
7916                         tp->napi[i].tx_cons = 0;
7917                         if (tg3_flag(tp, ENABLE_TSS))
7918                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7919                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7920                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7921                         tp->napi[0].chk_msi_cnt = 0;
7922                         tp->napi[i].last_rx_cons = 0;
7923                         tp->napi[i].last_tx_cons = 0;
7924                 }
7925                 if (!tg3_flag(tp, ENABLE_TSS))
7926                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7927         } else {
7928                 tp->napi[0].tx_prod = 0;
7929                 tp->napi[0].tx_cons = 0;
7930                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7931                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7932         }
7933
7934         /* Make sure the NIC-based send BD rings are disabled. */
7935         if (!tg3_flag(tp, 5705_PLUS)) {
7936                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7937                 for (i = 0; i < 16; i++)
7938                         tw32_tx_mbox(mbox + i * 8, 0);
7939         }
7940
7941         txrcb = NIC_SRAM_SEND_RCB;
7942         rxrcb = NIC_SRAM_RCV_RET_RCB;
7943
7944         /* Clear status block in ram. */
7945         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7946
7947         /* Set status block DMA address */
7948         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7949              ((u64) tnapi->status_mapping >> 32));
7950         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7951              ((u64) tnapi->status_mapping & 0xffffffff));
7952
7953         if (tnapi->tx_ring) {
7954                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7955                                (TG3_TX_RING_SIZE <<
7956                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7957                                NIC_SRAM_TX_BUFFER_DESC);
7958                 txrcb += TG3_BDINFO_SIZE;
7959         }
7960
7961         if (tnapi->rx_rcb) {
7962                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7963                                (tp->rx_ret_ring_mask + 1) <<
7964                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7965                 rxrcb += TG3_BDINFO_SIZE;
7966         }
7967
7968         stblk = HOSTCC_STATBLCK_RING1;
7969
7970         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7971                 u64 mapping = (u64)tnapi->status_mapping;
7972                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7973                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7974
7975                 /* Clear status block in ram. */
7976                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7977
7978                 if (tnapi->tx_ring) {
7979                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7980                                        (TG3_TX_RING_SIZE <<
7981                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7982                                        NIC_SRAM_TX_BUFFER_DESC);
7983                         txrcb += TG3_BDINFO_SIZE;
7984                 }
7985
7986                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7987                                ((tp->rx_ret_ring_mask + 1) <<
7988                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7989
7990                 stblk += 8;
7991                 rxrcb += TG3_BDINFO_SIZE;
7992         }
7993 }
7994
7995 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7996 {
7997         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7998
7999         if (!tg3_flag(tp, 5750_PLUS) ||
8000             tg3_flag(tp, 5780_CLASS) ||
8001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8002             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8003                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8004         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8005                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8006                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8007         else
8008                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8009
8010         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8011         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8012
8013         val = min(nic_rep_thresh, host_rep_thresh);
8014         tw32(RCVBDI_STD_THRESH, val);
8015
8016         if (tg3_flag(tp, 57765_PLUS))
8017                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8018
8019         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8020                 return;
8021
8022         if (!tg3_flag(tp, 5705_PLUS))
8023                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8024         else
8025                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8026
8027         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8028
8029         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8030         tw32(RCVBDI_JUMBO_THRESH, val);
8031
8032         if (tg3_flag(tp, 57765_PLUS))
8033                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8034 }
8035
8036 /* tp->lock is held. */
8037 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8038 {
8039         u32 val, rdmac_mode;
8040         int i, err, limit;
8041         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8042
8043         tg3_disable_ints(tp);
8044
8045         tg3_stop_fw(tp);
8046
8047         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8048
8049         if (tg3_flag(tp, INIT_COMPLETE))
8050                 tg3_abort_hw(tp, 1);
8051
8052         /* Enable MAC control of LPI */
8053         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8054                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8055                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8056                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8057
8058                 tw32_f(TG3_CPMU_EEE_CTRL,
8059                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8060
8061                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8062                       TG3_CPMU_EEEMD_LPI_IN_TX |
8063                       TG3_CPMU_EEEMD_LPI_IN_RX |
8064                       TG3_CPMU_EEEMD_EEE_ENABLE;
8065
8066                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8067                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8068
8069                 if (tg3_flag(tp, ENABLE_APE))
8070                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8071
8072                 tw32_f(TG3_CPMU_EEE_MODE, val);
8073
8074                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8075                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8076                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8077
8078                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8079                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8080                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8081         }
8082
8083         if (reset_phy)
8084                 tg3_phy_reset(tp);
8085
8086         err = tg3_chip_reset(tp);
8087         if (err)
8088                 return err;
8089
8090         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8091
8092         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8093                 val = tr32(TG3_CPMU_CTRL);
8094                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8095                 tw32(TG3_CPMU_CTRL, val);
8096
8097                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8098                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8099                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8100                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8101
8102                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8103                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8104                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8105                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8106
8107                 val = tr32(TG3_CPMU_HST_ACC);
8108                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8109                 val |= CPMU_HST_ACC_MACCLK_6_25;
8110                 tw32(TG3_CPMU_HST_ACC, val);
8111         }
8112
8113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8114                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8115                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8116                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8117                 tw32(PCIE_PWR_MGMT_THRESH, val);
8118
8119                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8120                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8121
8122                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8123
8124                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8125                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8126         }
8127
8128         if (tg3_flag(tp, L1PLLPD_EN)) {
8129                 u32 grc_mode = tr32(GRC_MODE);
8130
8131                 /* Access the lower 1K of PL PCIE block registers. */
8132                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8133                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8134
8135                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8136                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8137                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8138
8139                 tw32(GRC_MODE, grc_mode);
8140         }
8141
8142         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8143                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8144                         u32 grc_mode = tr32(GRC_MODE);
8145
8146                         /* Access the lower 1K of PL PCIE block registers. */
8147                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8148                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8149
8150                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8151                                    TG3_PCIE_PL_LO_PHYCTL5);
8152                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8153                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8154
8155                         tw32(GRC_MODE, grc_mode);
8156                 }
8157
8158                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8159                         u32 grc_mode = tr32(GRC_MODE);
8160
8161                         /* Access the lower 1K of DL PCIE block registers. */
8162                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8163                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8164
8165                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8166                                    TG3_PCIE_DL_LO_FTSMAX);
8167                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8168                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8169                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8170
8171                         tw32(GRC_MODE, grc_mode);
8172                 }
8173
8174                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8175                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8176                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8177                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8178         }
8179
8180         /* This works around an issue with Athlon chipsets on
8181          * B3 tigon3 silicon.  This bit has no effect on any
8182          * other revision.  But do not set this on PCI Express
8183          * chips and don't even touch the clocks if the CPMU is present.
8184          */
8185         if (!tg3_flag(tp, CPMU_PRESENT)) {
8186                 if (!tg3_flag(tp, PCI_EXPRESS))
8187                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8188                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8189         }
8190
8191         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8192             tg3_flag(tp, PCIX_MODE)) {
8193                 val = tr32(TG3PCI_PCISTATE);
8194                 val |= PCISTATE_RETRY_SAME_DMA;
8195                 tw32(TG3PCI_PCISTATE, val);
8196         }
8197
8198         if (tg3_flag(tp, ENABLE_APE)) {
8199                 /* Allow reads and writes to the
8200                  * APE register and memory space.
8201                  */
8202                 val = tr32(TG3PCI_PCISTATE);
8203                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8204                        PCISTATE_ALLOW_APE_SHMEM_WR |
8205                        PCISTATE_ALLOW_APE_PSPACE_WR;
8206                 tw32(TG3PCI_PCISTATE, val);
8207         }
8208
8209         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8210                 /* Enable some hw fixes.  */
8211                 val = tr32(TG3PCI_MSI_DATA);
8212                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8213                 tw32(TG3PCI_MSI_DATA, val);
8214         }
8215
8216         /* Descriptor ring init may make accesses to the
8217          * NIC SRAM area to setup the TX descriptors, so we
8218          * can only do this after the hardware has been
8219          * successfully reset.
8220          */
8221         err = tg3_init_rings(tp);
8222         if (err)
8223                 return err;
8224
8225         if (tg3_flag(tp, 57765_PLUS)) {
8226                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8227                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8228                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8229                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8230                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8231                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8232                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8233                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8234         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8235                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8236                 /* This value is determined during the probe time DMA
8237                  * engine test, tg3_test_dma.
8238                  */
8239                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8240         }
8241
8242         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8243                           GRC_MODE_4X_NIC_SEND_RINGS |
8244                           GRC_MODE_NO_TX_PHDR_CSUM |
8245                           GRC_MODE_NO_RX_PHDR_CSUM);
8246         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8247
8248         /* Pseudo-header checksum is done by hardware logic and not
8249          * the offload processers, so make the chip do the pseudo-
8250          * header checksums on receive.  For transmit it is more
8251          * convenient to do the pseudo-header checksum in software
8252          * as Linux does that on transmit for us in all cases.
8253          */
8254         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8255
8256         tw32(GRC_MODE,
8257              tp->grc_mode |
8258              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8259
8260         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8261         val = tr32(GRC_MISC_CFG);
8262         val &= ~0xff;
8263         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8264         tw32(GRC_MISC_CFG, val);
8265
8266         /* Initialize MBUF/DESC pool. */
8267         if (tg3_flag(tp, 5750_PLUS)) {
8268                 /* Do nothing.  */
8269         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8270                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8271                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8272                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8273                 else
8274                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8275                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8276                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8277         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8278                 int fw_len;
8279
8280                 fw_len = tp->fw_len;
8281                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8282                 tw32(BUFMGR_MB_POOL_ADDR,
8283                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8284                 tw32(BUFMGR_MB_POOL_SIZE,
8285                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8286         }
8287
8288         if (tp->dev->mtu <= ETH_DATA_LEN) {
8289                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8290                      tp->bufmgr_config.mbuf_read_dma_low_water);
8291                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8292                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8293                 tw32(BUFMGR_MB_HIGH_WATER,
8294                      tp->bufmgr_config.mbuf_high_water);
8295         } else {
8296                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8297                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8298                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8299                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8300                 tw32(BUFMGR_MB_HIGH_WATER,
8301                      tp->bufmgr_config.mbuf_high_water_jumbo);
8302         }
8303         tw32(BUFMGR_DMA_LOW_WATER,
8304              tp->bufmgr_config.dma_low_water);
8305         tw32(BUFMGR_DMA_HIGH_WATER,
8306              tp->bufmgr_config.dma_high_water);
8307
8308         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8310                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8312             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8313             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8314                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8315         tw32(BUFMGR_MODE, val);
8316         for (i = 0; i < 2000; i++) {
8317                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8318                         break;
8319                 udelay(10);
8320         }
8321         if (i >= 2000) {
8322                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8323                 return -ENODEV;
8324         }
8325
8326         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8327                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8328
8329         tg3_setup_rxbd_thresholds(tp);
8330
8331         /* Initialize TG3_BDINFO's at:
8332          *  RCVDBDI_STD_BD:     standard eth size rx ring
8333          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8334          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8335          *
8336          * like so:
8337          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8338          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8339          *                              ring attribute flags
8340          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8341          *
8342          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8343          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8344          *
8345          * The size of each ring is fixed in the firmware, but the location is
8346          * configurable.
8347          */
8348         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8349              ((u64) tpr->rx_std_mapping >> 32));
8350         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8351              ((u64) tpr->rx_std_mapping & 0xffffffff));
8352         if (!tg3_flag(tp, 5717_PLUS))
8353                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8354                      NIC_SRAM_RX_BUFFER_DESC);
8355
8356         /* Disable the mini ring */
8357         if (!tg3_flag(tp, 5705_PLUS))
8358                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8359                      BDINFO_FLAGS_DISABLED);
8360
8361         /* Program the jumbo buffer descriptor ring control
8362          * blocks on those devices that have them.
8363          */
8364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8365             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8366
8367                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8368                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8369                              ((u64) tpr->rx_jmb_mapping >> 32));
8370                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8371                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8372                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8373                               BDINFO_FLAGS_MAXLEN_SHIFT;
8374                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8375                              val | BDINFO_FLAGS_USE_EXT_RECV);
8376                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8377                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8378                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8379                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8380                 } else {
8381                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8382                              BDINFO_FLAGS_DISABLED);
8383                 }
8384
8385                 if (tg3_flag(tp, 57765_PLUS)) {
8386                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8387                                 val = TG3_RX_STD_MAX_SIZE_5700;
8388                         else
8389                                 val = TG3_RX_STD_MAX_SIZE_5717;
8390                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8391                         val |= (TG3_RX_STD_DMA_SZ << 2);
8392                 } else
8393                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8394         } else
8395                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8396
8397         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8398
8399         tpr->rx_std_prod_idx = tp->rx_pending;
8400         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8401
8402         tpr->rx_jmb_prod_idx =
8403                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8404         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8405
8406         tg3_rings_reset(tp);
8407
8408         /* Initialize MAC address and backoff seed. */
8409         __tg3_set_mac_addr(tp, 0);
8410
8411         /* MTU + ethernet header + FCS + optional VLAN tag */
8412         tw32(MAC_RX_MTU_SIZE,
8413              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8414
8415         /* The slot time is changed by tg3_setup_phy if we
8416          * run at gigabit with half duplex.
8417          */
8418         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8419               (6 << TX_LENGTHS_IPG_SHIFT) |
8420               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8421
8422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8423                 val |= tr32(MAC_TX_LENGTHS) &
8424                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8425                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8426
8427         tw32(MAC_TX_LENGTHS, val);
8428
8429         /* Receive rules. */
8430         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8431         tw32(RCVLPC_CONFIG, 0x0181);
8432
8433         /* Calculate RDMAC_MODE setting early, we need it to determine
8434          * the RCVLPC_STATE_ENABLE mask.
8435          */
8436         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8437                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8438                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8439                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8440                       RDMAC_MODE_LNGREAD_ENAB);
8441
8442         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8443                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8444
8445         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8446             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8447             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8448                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8449                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8450                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8451
8452         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8453             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8454                 if (tg3_flag(tp, TSO_CAPABLE) &&
8455                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8456                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8457                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8458                            !tg3_flag(tp, IS_5788)) {
8459                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8460                 }
8461         }
8462
8463         if (tg3_flag(tp, PCI_EXPRESS))
8464                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8465
8466         if (tg3_flag(tp, HW_TSO_1) ||
8467             tg3_flag(tp, HW_TSO_2) ||
8468             tg3_flag(tp, HW_TSO_3))
8469                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8470
8471         if (tg3_flag(tp, 57765_PLUS) ||
8472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8474                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8475
8476         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8477                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8478
8479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8480             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8482             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8483             tg3_flag(tp, 57765_PLUS)) {
8484                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8485                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8486                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8487                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8488                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8489                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8490                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8491                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8492                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8493                 }
8494                 tw32(TG3_RDMA_RSRVCTRL_REG,
8495                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8496         }
8497
8498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8499             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8500                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8501                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8502                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8503                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8504         }
8505
8506         /* Receive/send statistics. */
8507         if (tg3_flag(tp, 5750_PLUS)) {
8508                 val = tr32(RCVLPC_STATS_ENABLE);
8509                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8510                 tw32(RCVLPC_STATS_ENABLE, val);
8511         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8512                    tg3_flag(tp, TSO_CAPABLE)) {
8513                 val = tr32(RCVLPC_STATS_ENABLE);
8514                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8515                 tw32(RCVLPC_STATS_ENABLE, val);
8516         } else {
8517                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8518         }
8519         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8520         tw32(SNDDATAI_STATSENAB, 0xffffff);
8521         tw32(SNDDATAI_STATSCTRL,
8522              (SNDDATAI_SCTRL_ENABLE |
8523               SNDDATAI_SCTRL_FASTUPD));
8524
8525         /* Setup host coalescing engine. */
8526         tw32(HOSTCC_MODE, 0);
8527         for (i = 0; i < 2000; i++) {
8528                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8529                         break;
8530                 udelay(10);
8531         }
8532
8533         __tg3_set_coalesce(tp, &tp->coal);
8534
8535         if (!tg3_flag(tp, 5705_PLUS)) {
8536                 /* Status/statistics block address.  See tg3_timer,
8537                  * the tg3_periodic_fetch_stats call there, and
8538                  * tg3_get_stats to see how this works for 5705/5750 chips.
8539                  */
8540                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8541                      ((u64) tp->stats_mapping >> 32));
8542                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8543                      ((u64) tp->stats_mapping & 0xffffffff));
8544                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8545
8546                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8547
8548                 /* Clear statistics and status block memory areas */
8549                 for (i = NIC_SRAM_STATS_BLK;
8550                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8551                      i += sizeof(u32)) {
8552                         tg3_write_mem(tp, i, 0);
8553                         udelay(40);
8554                 }
8555         }
8556
8557         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8558
8559         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8560         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8561         if (!tg3_flag(tp, 5705_PLUS))
8562                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8563
8564         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8565                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8566                 /* reset to prevent losing 1st rx packet intermittently */
8567                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8568                 udelay(10);
8569         }
8570
8571         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8572                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8573                         MAC_MODE_FHDE_ENABLE;
8574         if (tg3_flag(tp, ENABLE_APE))
8575                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8576         if (!tg3_flag(tp, 5705_PLUS) &&
8577             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8578             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8579                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8580         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8581         udelay(40);
8582
8583         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8584          * If TG3_FLAG_IS_NIC is zero, we should read the
8585          * register to preserve the GPIO settings for LOMs. The GPIOs,
8586          * whether used as inputs or outputs, are set by boot code after
8587          * reset.
8588          */
8589         if (!tg3_flag(tp, IS_NIC)) {
8590                 u32 gpio_mask;
8591
8592                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8593                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8594                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8595
8596                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8597                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8598                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8599
8600                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8601                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8602
8603                 tp->grc_local_ctrl &= ~gpio_mask;
8604                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8605
8606                 /* GPIO1 must be driven high for eeprom write protect */
8607                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8608                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8609                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8610         }
8611         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8612         udelay(100);
8613
8614         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8615                 val = tr32(MSGINT_MODE);
8616                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8617                 tw32(MSGINT_MODE, val);
8618         }
8619
8620         if (!tg3_flag(tp, 5705_PLUS)) {
8621                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8622                 udelay(40);
8623         }
8624
8625         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8626                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8627                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8628                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8629                WDMAC_MODE_LNGREAD_ENAB);
8630
8631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8632             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8633                 if (tg3_flag(tp, TSO_CAPABLE) &&
8634                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8635                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8636                         /* nothing */
8637                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8638                            !tg3_flag(tp, IS_5788)) {
8639                         val |= WDMAC_MODE_RX_ACCEL;
8640                 }
8641         }
8642
8643         /* Enable host coalescing bug fix */
8644         if (tg3_flag(tp, 5755_PLUS))
8645                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8646
8647         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8648                 val |= WDMAC_MODE_BURST_ALL_DATA;
8649
8650         tw32_f(WDMAC_MODE, val);
8651         udelay(40);
8652
8653         if (tg3_flag(tp, PCIX_MODE)) {
8654                 u16 pcix_cmd;
8655
8656                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8657                                      &pcix_cmd);
8658                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8659                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8660                         pcix_cmd |= PCI_X_CMD_READ_2K;
8661                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8662                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8663                         pcix_cmd |= PCI_X_CMD_READ_2K;
8664                 }
8665                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8666                                       pcix_cmd);
8667         }
8668
8669         tw32_f(RDMAC_MODE, rdmac_mode);
8670         udelay(40);
8671
8672         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8673         if (!tg3_flag(tp, 5705_PLUS))
8674                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8675
8676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8677                 tw32(SNDDATAC_MODE,
8678                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8679         else
8680                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8681
8682         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8683         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8684         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8685         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8686                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8687         tw32(RCVDBDI_MODE, val);
8688         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8689         if (tg3_flag(tp, HW_TSO_1) ||
8690             tg3_flag(tp, HW_TSO_2) ||
8691             tg3_flag(tp, HW_TSO_3))
8692                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8693         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8694         if (tg3_flag(tp, ENABLE_TSS))
8695                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8696         tw32(SNDBDI_MODE, val);
8697         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8698
8699         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8700                 err = tg3_load_5701_a0_firmware_fix(tp);
8701                 if (err)
8702                         return err;
8703         }
8704
8705         if (tg3_flag(tp, TSO_CAPABLE)) {
8706                 err = tg3_load_tso_firmware(tp);
8707                 if (err)
8708                         return err;
8709         }
8710
8711         tp->tx_mode = TX_MODE_ENABLE;
8712
8713         if (tg3_flag(tp, 5755_PLUS) ||
8714             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8715                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8716
8717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8718                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8719                 tp->tx_mode &= ~val;
8720                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8721         }
8722
8723         tw32_f(MAC_TX_MODE, tp->tx_mode);
8724         udelay(100);
8725
8726         if (tg3_flag(tp, ENABLE_RSS)) {
8727                 int i = 0;
8728                 u32 reg = MAC_RSS_INDIR_TBL_0;
8729
8730                 if (tp->irq_cnt == 2) {
8731                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8732                                 tw32(reg, 0x0);
8733                                 reg += 4;
8734                         }
8735                 } else {
8736                         u32 val;
8737
8738                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8739                                 val = i % (tp->irq_cnt - 1);
8740                                 i++;
8741                                 for (; i % 8; i++) {
8742                                         val <<= 4;
8743                                         val |= (i % (tp->irq_cnt - 1));
8744                                 }
8745                                 tw32(reg, val);
8746                                 reg += 4;
8747                         }
8748                 }
8749
8750                 /* Setup the "secret" hash key. */
8751                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8752                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8753                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8754                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8755                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8756                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8757                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8758                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8759                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8760                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8761         }
8762
8763         tp->rx_mode = RX_MODE_ENABLE;
8764         if (tg3_flag(tp, 5755_PLUS))
8765                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8766
8767         if (tg3_flag(tp, ENABLE_RSS))
8768                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8769                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8770                                RX_MODE_RSS_IPV6_HASH_EN |
8771                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8772                                RX_MODE_RSS_IPV4_HASH_EN |
8773                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8774
8775         tw32_f(MAC_RX_MODE, tp->rx_mode);
8776         udelay(10);
8777
8778         tw32(MAC_LED_CTRL, tp->led_ctrl);
8779
8780         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8781         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8782                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8783                 udelay(10);
8784         }
8785         tw32_f(MAC_RX_MODE, tp->rx_mode);
8786         udelay(10);
8787
8788         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8789                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8790                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8791                         /* Set drive transmission level to 1.2V  */
8792                         /* only if the signal pre-emphasis bit is not set  */
8793                         val = tr32(MAC_SERDES_CFG);
8794                         val &= 0xfffff000;
8795                         val |= 0x880;
8796                         tw32(MAC_SERDES_CFG, val);
8797                 }
8798                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8799                         tw32(MAC_SERDES_CFG, 0x616000);
8800         }
8801
8802         /* Prevent chip from dropping frames when flow control
8803          * is enabled.
8804          */
8805         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8806                 val = 1;
8807         else
8808                 val = 2;
8809         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8810
8811         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8812             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8813                 /* Use hardware link auto-negotiation */
8814                 tg3_flag_set(tp, HW_AUTONEG);
8815         }
8816
8817         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8818             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8819                 u32 tmp;
8820
8821                 tmp = tr32(SERDES_RX_CTRL);
8822                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8823                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8824                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8825                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8826         }
8827
8828         if (!tg3_flag(tp, USE_PHYLIB)) {
8829                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8830                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8831                         tp->link_config.speed = tp->link_config.orig_speed;
8832                         tp->link_config.duplex = tp->link_config.orig_duplex;
8833                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8834                 }
8835
8836                 err = tg3_setup_phy(tp, 0);
8837                 if (err)
8838                         return err;
8839
8840                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8841                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8842                         u32 tmp;
8843
8844                         /* Clear CRC stats. */
8845                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8846                                 tg3_writephy(tp, MII_TG3_TEST1,
8847                                              tmp | MII_TG3_TEST1_CRC_EN);
8848                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8849                         }
8850                 }
8851         }
8852
8853         __tg3_set_rx_mode(tp->dev);
8854
8855         /* Initialize receive rules. */
8856         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8857         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8858         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8859         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8860
8861         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8862                 limit = 8;
8863         else
8864                 limit = 16;
8865         if (tg3_flag(tp, ENABLE_ASF))
8866                 limit -= 4;
8867         switch (limit) {
8868         case 16:
8869                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8870         case 15:
8871                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8872         case 14:
8873                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8874         case 13:
8875                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8876         case 12:
8877                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8878         case 11:
8879                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8880         case 10:
8881                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8882         case 9:
8883                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8884         case 8:
8885                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8886         case 7:
8887                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8888         case 6:
8889                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8890         case 5:
8891                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8892         case 4:
8893                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8894         case 3:
8895                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8896         case 2:
8897         case 1:
8898
8899         default:
8900                 break;
8901         }
8902
8903         if (tg3_flag(tp, ENABLE_APE))
8904                 /* Write our heartbeat update interval to APE. */
8905                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8906                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8907
8908         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8909
8910         return 0;
8911 }
8912
8913 /* Called at device open time to get the chip ready for
8914  * packet processing.  Invoked with tp->lock held.
8915  */
8916 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8917 {
8918         tg3_switch_clocks(tp);
8919
8920         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8921
8922         return tg3_reset_hw(tp, reset_phy);
8923 }
8924
8925 #define TG3_STAT_ADD32(PSTAT, REG) \
8926 do {    u32 __val = tr32(REG); \
8927         (PSTAT)->low += __val; \
8928         if ((PSTAT)->low < __val) \
8929                 (PSTAT)->high += 1; \
8930 } while (0)
8931
8932 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8933 {
8934         struct tg3_hw_stats *sp = tp->hw_stats;
8935
8936         if (!netif_carrier_ok(tp->dev))
8937                 return;
8938
8939         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8940         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8941         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8942         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8943         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8944         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8945         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8946         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8947         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8948         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8949         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8950         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8951         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8952
8953         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8954         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8955         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8956         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8957         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8958         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8959         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8960         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8961         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8962         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8963         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8964         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8965         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8966         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8967
8968         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8969         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8970             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8971             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8972                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8973         } else {
8974                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8975                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8976                 if (val) {
8977                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8978                         sp->rx_discards.low += val;
8979                         if (sp->rx_discards.low < val)
8980                                 sp->rx_discards.high += 1;
8981                 }
8982                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8983         }
8984         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8985 }
8986
8987 static void tg3_chk_missed_msi(struct tg3 *tp)
8988 {
8989         u32 i;
8990
8991         for (i = 0; i < tp->irq_cnt; i++) {
8992                 struct tg3_napi *tnapi = &tp->napi[i];
8993
8994                 if (tg3_has_work(tnapi)) {
8995                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8996                             tnapi->last_tx_cons == tnapi->tx_cons) {
8997                                 if (tnapi->chk_msi_cnt < 1) {
8998                                         tnapi->chk_msi_cnt++;
8999                                         return;
9000                                 }
9001                                 tw32_mailbox(tnapi->int_mbox,
9002                                              tnapi->last_tag << 24);
9003                         }
9004                 }
9005                 tnapi->chk_msi_cnt = 0;
9006                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9007                 tnapi->last_tx_cons = tnapi->tx_cons;
9008         }
9009 }
9010
9011 static void tg3_timer(unsigned long __opaque)
9012 {
9013         struct tg3 *tp = (struct tg3 *) __opaque;
9014
9015         if (tp->irq_sync)
9016                 goto restart_timer;
9017
9018         spin_lock(&tp->lock);
9019
9020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9022                 tg3_chk_missed_msi(tp);
9023
9024         if (!tg3_flag(tp, TAGGED_STATUS)) {
9025                 /* All of this garbage is because when using non-tagged
9026                  * IRQ status the mailbox/status_block protocol the chip
9027                  * uses with the cpu is race prone.
9028                  */
9029                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9030                         tw32(GRC_LOCAL_CTRL,
9031                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9032                 } else {
9033                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9034                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9035                 }
9036
9037                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9038                         tg3_flag_set(tp, RESTART_TIMER);
9039                         spin_unlock(&tp->lock);
9040                         schedule_work(&tp->reset_task);
9041                         return;
9042                 }
9043         }
9044
9045         /* This part only runs once per second. */
9046         if (!--tp->timer_counter) {
9047                 if (tg3_flag(tp, 5705_PLUS))
9048                         tg3_periodic_fetch_stats(tp);
9049
9050                 if (tp->setlpicnt && !--tp->setlpicnt)
9051                         tg3_phy_eee_enable(tp);
9052
9053                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9054                         u32 mac_stat;
9055                         int phy_event;
9056
9057                         mac_stat = tr32(MAC_STATUS);
9058
9059                         phy_event = 0;
9060                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9061                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9062                                         phy_event = 1;
9063                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9064                                 phy_event = 1;
9065
9066                         if (phy_event)
9067                                 tg3_setup_phy(tp, 0);
9068                 } else if (tg3_flag(tp, POLL_SERDES)) {
9069                         u32 mac_stat = tr32(MAC_STATUS);
9070                         int need_setup = 0;
9071
9072                         if (netif_carrier_ok(tp->dev) &&
9073                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9074                                 need_setup = 1;
9075                         }
9076                         if (!netif_carrier_ok(tp->dev) &&
9077                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9078                                          MAC_STATUS_SIGNAL_DET))) {
9079                                 need_setup = 1;
9080                         }
9081                         if (need_setup) {
9082                                 if (!tp->serdes_counter) {
9083                                         tw32_f(MAC_MODE,
9084                                              (tp->mac_mode &
9085                                               ~MAC_MODE_PORT_MODE_MASK));
9086                                         udelay(40);
9087                                         tw32_f(MAC_MODE, tp->mac_mode);
9088                                         udelay(40);
9089                                 }
9090                                 tg3_setup_phy(tp, 0);
9091                         }
9092                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9093                            tg3_flag(tp, 5780_CLASS)) {
9094                         tg3_serdes_parallel_detect(tp);
9095                 }
9096
9097                 tp->timer_counter = tp->timer_multiplier;
9098         }
9099
9100         /* Heartbeat is only sent once every 2 seconds.
9101          *
9102          * The heartbeat is to tell the ASF firmware that the host
9103          * driver is still alive.  In the event that the OS crashes,
9104          * ASF needs to reset the hardware to free up the FIFO space
9105          * that may be filled with rx packets destined for the host.
9106          * If the FIFO is full, ASF will no longer function properly.
9107          *
9108          * Unintended resets have been reported on real time kernels
9109          * where the timer doesn't run on time.  Netpoll will also have
9110          * same problem.
9111          *
9112          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9113          * to check the ring condition when the heartbeat is expiring
9114          * before doing the reset.  This will prevent most unintended
9115          * resets.
9116          */
9117         if (!--tp->asf_counter) {
9118                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9119                         tg3_wait_for_event_ack(tp);
9120
9121                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9122                                       FWCMD_NICDRV_ALIVE3);
9123                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9124                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9125                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9126
9127                         tg3_generate_fw_event(tp);
9128                 }
9129                 tp->asf_counter = tp->asf_multiplier;
9130         }
9131
9132         spin_unlock(&tp->lock);
9133
9134 restart_timer:
9135         tp->timer.expires = jiffies + tp->timer_offset;
9136         add_timer(&tp->timer);
9137 }
9138
9139 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9140 {
9141         irq_handler_t fn;
9142         unsigned long flags;
9143         char *name;
9144         struct tg3_napi *tnapi = &tp->napi[irq_num];
9145
9146         if (tp->irq_cnt == 1)
9147                 name = tp->dev->name;
9148         else {
9149                 name = &tnapi->irq_lbl[0];
9150                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9151                 name[IFNAMSIZ-1] = 0;
9152         }
9153
9154         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9155                 fn = tg3_msi;
9156                 if (tg3_flag(tp, 1SHOT_MSI))
9157                         fn = tg3_msi_1shot;
9158                 flags = 0;
9159         } else {
9160                 fn = tg3_interrupt;
9161                 if (tg3_flag(tp, TAGGED_STATUS))
9162                         fn = tg3_interrupt_tagged;
9163                 flags = IRQF_SHARED;
9164         }
9165
9166         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9167 }
9168
9169 static int tg3_test_interrupt(struct tg3 *tp)
9170 {
9171         struct tg3_napi *tnapi = &tp->napi[0];
9172         struct net_device *dev = tp->dev;
9173         int err, i, intr_ok = 0;
9174         u32 val;
9175
9176         if (!netif_running(dev))
9177                 return -ENODEV;
9178
9179         tg3_disable_ints(tp);
9180
9181         free_irq(tnapi->irq_vec, tnapi);
9182
9183         /*
9184          * Turn off MSI one shot mode.  Otherwise this test has no
9185          * observable way to know whether the interrupt was delivered.
9186          */
9187         if (tg3_flag(tp, 57765_PLUS)) {
9188                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9189                 tw32(MSGINT_MODE, val);
9190         }
9191
9192         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9193                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9194         if (err)
9195                 return err;
9196
9197         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9198         tg3_enable_ints(tp);
9199
9200         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9201                tnapi->coal_now);
9202
9203         for (i = 0; i < 5; i++) {
9204                 u32 int_mbox, misc_host_ctrl;
9205
9206                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9207                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9208
9209                 if ((int_mbox != 0) ||
9210                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9211                         intr_ok = 1;
9212                         break;
9213                 }
9214
9215                 if (tg3_flag(tp, 57765_PLUS) &&
9216                     tnapi->hw_status->status_tag != tnapi->last_tag)
9217                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9218
9219                 msleep(10);
9220         }
9221
9222         tg3_disable_ints(tp);
9223
9224         free_irq(tnapi->irq_vec, tnapi);
9225
9226         err = tg3_request_irq(tp, 0);
9227
9228         if (err)
9229                 return err;
9230
9231         if (intr_ok) {
9232                 /* Reenable MSI one shot mode. */
9233                 if (tg3_flag(tp, 57765_PLUS)) {
9234                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9235                         tw32(MSGINT_MODE, val);
9236                 }
9237                 return 0;
9238         }
9239
9240         return -EIO;
9241 }
9242
9243 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9244  * successfully restored
9245  */
9246 static int tg3_test_msi(struct tg3 *tp)
9247 {
9248         int err;
9249         u16 pci_cmd;
9250
9251         if (!tg3_flag(tp, USING_MSI))
9252                 return 0;
9253
9254         /* Turn off SERR reporting in case MSI terminates with Master
9255          * Abort.
9256          */
9257         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9258         pci_write_config_word(tp->pdev, PCI_COMMAND,
9259                               pci_cmd & ~PCI_COMMAND_SERR);
9260
9261         err = tg3_test_interrupt(tp);
9262
9263         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9264
9265         if (!err)
9266                 return 0;
9267
9268         /* other failures */
9269         if (err != -EIO)
9270                 return err;
9271
9272         /* MSI test failed, go back to INTx mode */
9273         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9274                     "to INTx mode. Please report this failure to the PCI "
9275                     "maintainer and include system chipset information\n");
9276
9277         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9278
9279         pci_disable_msi(tp->pdev);
9280
9281         tg3_flag_clear(tp, USING_MSI);
9282         tp->napi[0].irq_vec = tp->pdev->irq;
9283
9284         err = tg3_request_irq(tp, 0);
9285         if (err)
9286                 return err;
9287
9288         /* Need to reset the chip because the MSI cycle may have terminated
9289          * with Master Abort.
9290          */
9291         tg3_full_lock(tp, 1);
9292
9293         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9294         err = tg3_init_hw(tp, 1);
9295
9296         tg3_full_unlock(tp);
9297
9298         if (err)
9299                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9300
9301         return err;
9302 }
9303
9304 static int tg3_request_firmware(struct tg3 *tp)
9305 {
9306         const __be32 *fw_data;
9307
9308         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9309                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9310                            tp->fw_needed);
9311                 return -ENOENT;
9312         }
9313
9314         fw_data = (void *)tp->fw->data;
9315
9316         /* Firmware blob starts with version numbers, followed by
9317          * start address and _full_ length including BSS sections
9318          * (which must be longer than the actual data, of course
9319          */
9320
9321         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9322         if (tp->fw_len < (tp->fw->size - 12)) {
9323                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9324                            tp->fw_len, tp->fw_needed);
9325                 release_firmware(tp->fw);
9326                 tp->fw = NULL;
9327                 return -EINVAL;
9328         }
9329
9330         /* We no longer need firmware; we have it. */
9331         tp->fw_needed = NULL;
9332         return 0;
9333 }
9334
9335 static bool tg3_enable_msix(struct tg3 *tp)
9336 {
9337         int i, rc, cpus = num_online_cpus();
9338         struct msix_entry msix_ent[tp->irq_max];
9339
9340         if (cpus == 1)
9341                 /* Just fallback to the simpler MSI mode. */
9342                 return false;
9343
9344         /*
9345          * We want as many rx rings enabled as there are cpus.
9346          * The first MSIX vector only deals with link interrupts, etc,
9347          * so we add one to the number of vectors we are requesting.
9348          */
9349         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9350
9351         for (i = 0; i < tp->irq_max; i++) {
9352                 msix_ent[i].entry  = i;
9353                 msix_ent[i].vector = 0;
9354         }
9355
9356         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9357         if (rc < 0) {
9358                 return false;
9359         } else if (rc != 0) {
9360                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9361                         return false;
9362                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9363                               tp->irq_cnt, rc);
9364                 tp->irq_cnt = rc;
9365         }
9366
9367         for (i = 0; i < tp->irq_max; i++)
9368                 tp->napi[i].irq_vec = msix_ent[i].vector;
9369
9370         netif_set_real_num_tx_queues(tp->dev, 1);
9371         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9372         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9373                 pci_disable_msix(tp->pdev);
9374                 return false;
9375         }
9376
9377         if (tp->irq_cnt > 1) {
9378                 tg3_flag_set(tp, ENABLE_RSS);
9379
9380                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9381                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9382                         tg3_flag_set(tp, ENABLE_TSS);
9383                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9384                 }
9385         }
9386
9387         return true;
9388 }
9389
9390 static void tg3_ints_init(struct tg3 *tp)
9391 {
9392         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9393             !tg3_flag(tp, TAGGED_STATUS)) {
9394                 /* All MSI supporting chips should support tagged
9395                  * status.  Assert that this is the case.
9396                  */
9397                 netdev_warn(tp->dev,
9398                             "MSI without TAGGED_STATUS? Not using MSI\n");
9399                 goto defcfg;
9400         }
9401
9402         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9403                 tg3_flag_set(tp, USING_MSIX);
9404         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9405                 tg3_flag_set(tp, USING_MSI);
9406
9407         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9408                 u32 msi_mode = tr32(MSGINT_MODE);
9409                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9410                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9411                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9412         }
9413 defcfg:
9414         if (!tg3_flag(tp, USING_MSIX)) {
9415                 tp->irq_cnt = 1;
9416                 tp->napi[0].irq_vec = tp->pdev->irq;
9417                 netif_set_real_num_tx_queues(tp->dev, 1);
9418                 netif_set_real_num_rx_queues(tp->dev, 1);
9419         }
9420 }
9421
9422 static void tg3_ints_fini(struct tg3 *tp)
9423 {
9424         if (tg3_flag(tp, USING_MSIX))
9425                 pci_disable_msix(tp->pdev);
9426         else if (tg3_flag(tp, USING_MSI))
9427                 pci_disable_msi(tp->pdev);
9428         tg3_flag_clear(tp, USING_MSI);
9429         tg3_flag_clear(tp, USING_MSIX);
9430         tg3_flag_clear(tp, ENABLE_RSS);
9431         tg3_flag_clear(tp, ENABLE_TSS);
9432 }
9433
9434 static int tg3_open(struct net_device *dev)
9435 {
9436         struct tg3 *tp = netdev_priv(dev);
9437         int i, err;
9438
9439         if (tp->fw_needed) {
9440                 err = tg3_request_firmware(tp);
9441                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9442                         if (err)
9443                                 return err;
9444                 } else if (err) {
9445                         netdev_warn(tp->dev, "TSO capability disabled\n");
9446                         tg3_flag_clear(tp, TSO_CAPABLE);
9447                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9448                         netdev_notice(tp->dev, "TSO capability restored\n");
9449                         tg3_flag_set(tp, TSO_CAPABLE);
9450                 }
9451         }
9452
9453         netif_carrier_off(tp->dev);
9454
9455         err = tg3_power_up(tp);
9456         if (err)
9457                 return err;
9458
9459         tg3_full_lock(tp, 0);
9460
9461         tg3_disable_ints(tp);
9462         tg3_flag_clear(tp, INIT_COMPLETE);
9463
9464         tg3_full_unlock(tp);
9465
9466         /*
9467          * Setup interrupts first so we know how
9468          * many NAPI resources to allocate
9469          */
9470         tg3_ints_init(tp);
9471
9472         /* The placement of this call is tied
9473          * to the setup and use of Host TX descriptors.
9474          */
9475         err = tg3_alloc_consistent(tp);
9476         if (err)
9477                 goto err_out1;
9478
9479         tg3_napi_init(tp);
9480
9481         tg3_napi_enable(tp);
9482
9483         for (i = 0; i < tp->irq_cnt; i++) {
9484                 struct tg3_napi *tnapi = &tp->napi[i];
9485                 err = tg3_request_irq(tp, i);
9486                 if (err) {
9487                         for (i--; i >= 0; i--)
9488                                 free_irq(tnapi->irq_vec, tnapi);
9489                         break;
9490                 }
9491         }
9492
9493         if (err)
9494                 goto err_out2;
9495
9496         tg3_full_lock(tp, 0);
9497
9498         err = tg3_init_hw(tp, 1);
9499         if (err) {
9500                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9501                 tg3_free_rings(tp);
9502         } else {
9503                 if (tg3_flag(tp, TAGGED_STATUS) &&
9504                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9505                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9506                         tp->timer_offset = HZ;
9507                 else
9508                         tp->timer_offset = HZ / 10;
9509
9510                 BUG_ON(tp->timer_offset > HZ);
9511                 tp->timer_counter = tp->timer_multiplier =
9512                         (HZ / tp->timer_offset);
9513                 tp->asf_counter = tp->asf_multiplier =
9514                         ((HZ / tp->timer_offset) * 2);
9515
9516                 init_timer(&tp->timer);
9517                 tp->timer.expires = jiffies + tp->timer_offset;
9518                 tp->timer.data = (unsigned long) tp;
9519                 tp->timer.function = tg3_timer;
9520         }
9521
9522         tg3_full_unlock(tp);
9523
9524         if (err)
9525                 goto err_out3;
9526
9527         if (tg3_flag(tp, USING_MSI)) {
9528                 err = tg3_test_msi(tp);
9529
9530                 if (err) {
9531                         tg3_full_lock(tp, 0);
9532                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9533                         tg3_free_rings(tp);
9534                         tg3_full_unlock(tp);
9535
9536                         goto err_out2;
9537                 }
9538
9539                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9540                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9541
9542                         tw32(PCIE_TRANSACTION_CFG,
9543                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9544                 }
9545         }
9546
9547         tg3_phy_start(tp);
9548
9549         tg3_full_lock(tp, 0);
9550
9551         add_timer(&tp->timer);
9552         tg3_flag_set(tp, INIT_COMPLETE);
9553         tg3_enable_ints(tp);
9554
9555         tg3_full_unlock(tp);
9556
9557         netif_tx_start_all_queues(dev);
9558
9559         /*
9560          * Reset loopback feature if it was turned on while the device was down
9561          * make sure that it's installed properly now.
9562          */
9563         if (dev->features & NETIF_F_LOOPBACK)
9564                 tg3_set_loopback(dev, dev->features);
9565
9566         return 0;
9567
9568 err_out3:
9569         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9570                 struct tg3_napi *tnapi = &tp->napi[i];
9571                 free_irq(tnapi->irq_vec, tnapi);
9572         }
9573
9574 err_out2:
9575         tg3_napi_disable(tp);
9576         tg3_napi_fini(tp);
9577         tg3_free_consistent(tp);
9578
9579 err_out1:
9580         tg3_ints_fini(tp);
9581         tg3_frob_aux_power(tp, false);
9582         pci_set_power_state(tp->pdev, PCI_D3hot);
9583         return err;
9584 }
9585
9586 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9587                                                  struct rtnl_link_stats64 *);
9588 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9589
9590 static int tg3_close(struct net_device *dev)
9591 {
9592         int i;
9593         struct tg3 *tp = netdev_priv(dev);
9594
9595         tg3_napi_disable(tp);
9596         cancel_work_sync(&tp->reset_task);
9597
9598         netif_tx_stop_all_queues(dev);
9599
9600         del_timer_sync(&tp->timer);
9601
9602         tg3_phy_stop(tp);
9603
9604         tg3_full_lock(tp, 1);
9605
9606         tg3_disable_ints(tp);
9607
9608         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9609         tg3_free_rings(tp);
9610         tg3_flag_clear(tp, INIT_COMPLETE);
9611
9612         tg3_full_unlock(tp);
9613
9614         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9615                 struct tg3_napi *tnapi = &tp->napi[i];
9616                 free_irq(tnapi->irq_vec, tnapi);
9617         }
9618
9619         tg3_ints_fini(tp);
9620
9621         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9622
9623         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9624                sizeof(tp->estats_prev));
9625
9626         tg3_napi_fini(tp);
9627
9628         tg3_free_consistent(tp);
9629
9630         tg3_power_down(tp);
9631
9632         netif_carrier_off(tp->dev);
9633
9634         return 0;
9635 }
9636
9637 static inline u64 get_stat64(tg3_stat64_t *val)
9638 {
9639        return ((u64)val->high << 32) | ((u64)val->low);
9640 }
9641
9642 static u64 calc_crc_errors(struct tg3 *tp)
9643 {
9644         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9645
9646         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9647             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9648              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9649                 u32 val;
9650
9651                 spin_lock_bh(&tp->lock);
9652                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9653                         tg3_writephy(tp, MII_TG3_TEST1,
9654                                      val | MII_TG3_TEST1_CRC_EN);
9655                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9656                 } else
9657                         val = 0;
9658                 spin_unlock_bh(&tp->lock);
9659
9660                 tp->phy_crc_errors += val;
9661
9662                 return tp->phy_crc_errors;
9663         }
9664
9665         return get_stat64(&hw_stats->rx_fcs_errors);
9666 }
9667
9668 #define ESTAT_ADD(member) \
9669         estats->member =        old_estats->member + \
9670                                 get_stat64(&hw_stats->member)
9671
9672 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9673 {
9674         struct tg3_ethtool_stats *estats = &tp->estats;
9675         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9676         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9677
9678         if (!hw_stats)
9679                 return old_estats;
9680
9681         ESTAT_ADD(rx_octets);
9682         ESTAT_ADD(rx_fragments);
9683         ESTAT_ADD(rx_ucast_packets);
9684         ESTAT_ADD(rx_mcast_packets);
9685         ESTAT_ADD(rx_bcast_packets);
9686         ESTAT_ADD(rx_fcs_errors);
9687         ESTAT_ADD(rx_align_errors);
9688         ESTAT_ADD(rx_xon_pause_rcvd);
9689         ESTAT_ADD(rx_xoff_pause_rcvd);
9690         ESTAT_ADD(rx_mac_ctrl_rcvd);
9691         ESTAT_ADD(rx_xoff_entered);
9692         ESTAT_ADD(rx_frame_too_long_errors);
9693         ESTAT_ADD(rx_jabbers);
9694         ESTAT_ADD(rx_undersize_packets);
9695         ESTAT_ADD(rx_in_length_errors);
9696         ESTAT_ADD(rx_out_length_errors);
9697         ESTAT_ADD(rx_64_or_less_octet_packets);
9698         ESTAT_ADD(rx_65_to_127_octet_packets);
9699         ESTAT_ADD(rx_128_to_255_octet_packets);
9700         ESTAT_ADD(rx_256_to_511_octet_packets);
9701         ESTAT_ADD(rx_512_to_1023_octet_packets);
9702         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9703         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9704         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9705         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9706         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9707
9708         ESTAT_ADD(tx_octets);
9709         ESTAT_ADD(tx_collisions);
9710         ESTAT_ADD(tx_xon_sent);
9711         ESTAT_ADD(tx_xoff_sent);
9712         ESTAT_ADD(tx_flow_control);
9713         ESTAT_ADD(tx_mac_errors);
9714         ESTAT_ADD(tx_single_collisions);
9715         ESTAT_ADD(tx_mult_collisions);
9716         ESTAT_ADD(tx_deferred);
9717         ESTAT_ADD(tx_excessive_collisions);
9718         ESTAT_ADD(tx_late_collisions);
9719         ESTAT_ADD(tx_collide_2times);
9720         ESTAT_ADD(tx_collide_3times);
9721         ESTAT_ADD(tx_collide_4times);
9722         ESTAT_ADD(tx_collide_5times);
9723         ESTAT_ADD(tx_collide_6times);
9724         ESTAT_ADD(tx_collide_7times);
9725         ESTAT_ADD(tx_collide_8times);
9726         ESTAT_ADD(tx_collide_9times);
9727         ESTAT_ADD(tx_collide_10times);
9728         ESTAT_ADD(tx_collide_11times);
9729         ESTAT_ADD(tx_collide_12times);
9730         ESTAT_ADD(tx_collide_13times);
9731         ESTAT_ADD(tx_collide_14times);
9732         ESTAT_ADD(tx_collide_15times);
9733         ESTAT_ADD(tx_ucast_packets);
9734         ESTAT_ADD(tx_mcast_packets);
9735         ESTAT_ADD(tx_bcast_packets);
9736         ESTAT_ADD(tx_carrier_sense_errors);
9737         ESTAT_ADD(tx_discards);
9738         ESTAT_ADD(tx_errors);
9739
9740         ESTAT_ADD(dma_writeq_full);
9741         ESTAT_ADD(dma_write_prioq_full);
9742         ESTAT_ADD(rxbds_empty);
9743         ESTAT_ADD(rx_discards);
9744         ESTAT_ADD(rx_errors);
9745         ESTAT_ADD(rx_threshold_hit);
9746
9747         ESTAT_ADD(dma_readq_full);
9748         ESTAT_ADD(dma_read_prioq_full);
9749         ESTAT_ADD(tx_comp_queue_full);
9750
9751         ESTAT_ADD(ring_set_send_prod_index);
9752         ESTAT_ADD(ring_status_update);
9753         ESTAT_ADD(nic_irqs);
9754         ESTAT_ADD(nic_avoided_irqs);
9755         ESTAT_ADD(nic_tx_threshold_hit);
9756
9757         ESTAT_ADD(mbuf_lwm_thresh_hit);
9758
9759         return estats;
9760 }
9761
9762 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9763                                                  struct rtnl_link_stats64 *stats)
9764 {
9765         struct tg3 *tp = netdev_priv(dev);
9766         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9767         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9768
9769         if (!hw_stats)
9770                 return old_stats;
9771
9772         stats->rx_packets = old_stats->rx_packets +
9773                 get_stat64(&hw_stats->rx_ucast_packets) +
9774                 get_stat64(&hw_stats->rx_mcast_packets) +
9775                 get_stat64(&hw_stats->rx_bcast_packets);
9776
9777         stats->tx_packets = old_stats->tx_packets +
9778                 get_stat64(&hw_stats->tx_ucast_packets) +
9779                 get_stat64(&hw_stats->tx_mcast_packets) +
9780                 get_stat64(&hw_stats->tx_bcast_packets);
9781
9782         stats->rx_bytes = old_stats->rx_bytes +
9783                 get_stat64(&hw_stats->rx_octets);
9784         stats->tx_bytes = old_stats->tx_bytes +
9785                 get_stat64(&hw_stats->tx_octets);
9786
9787         stats->rx_errors = old_stats->rx_errors +
9788                 get_stat64(&hw_stats->rx_errors);
9789         stats->tx_errors = old_stats->tx_errors +
9790                 get_stat64(&hw_stats->tx_errors) +
9791                 get_stat64(&hw_stats->tx_mac_errors) +
9792                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9793                 get_stat64(&hw_stats->tx_discards);
9794
9795         stats->multicast = old_stats->multicast +
9796                 get_stat64(&hw_stats->rx_mcast_packets);
9797         stats->collisions = old_stats->collisions +
9798                 get_stat64(&hw_stats->tx_collisions);
9799
9800         stats->rx_length_errors = old_stats->rx_length_errors +
9801                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9802                 get_stat64(&hw_stats->rx_undersize_packets);
9803
9804         stats->rx_over_errors = old_stats->rx_over_errors +
9805                 get_stat64(&hw_stats->rxbds_empty);
9806         stats->rx_frame_errors = old_stats->rx_frame_errors +
9807                 get_stat64(&hw_stats->rx_align_errors);
9808         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9809                 get_stat64(&hw_stats->tx_discards);
9810         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9811                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9812
9813         stats->rx_crc_errors = old_stats->rx_crc_errors +
9814                 calc_crc_errors(tp);
9815
9816         stats->rx_missed_errors = old_stats->rx_missed_errors +
9817                 get_stat64(&hw_stats->rx_discards);
9818
9819         stats->rx_dropped = tp->rx_dropped;
9820
9821         return stats;
9822 }
9823
9824 static inline u32 calc_crc(unsigned char *buf, int len)
9825 {
9826         u32 reg;
9827         u32 tmp;
9828         int j, k;
9829
9830         reg = 0xffffffff;
9831
9832         for (j = 0; j < len; j++) {
9833                 reg ^= buf[j];
9834
9835                 for (k = 0; k < 8; k++) {
9836                         tmp = reg & 0x01;
9837
9838                         reg >>= 1;
9839
9840                         if (tmp)
9841                                 reg ^= 0xedb88320;
9842                 }
9843         }
9844
9845         return ~reg;
9846 }
9847
9848 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9849 {
9850         /* accept or reject all multicast frames */
9851         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9852         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9853         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9854         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9855 }
9856
9857 static void __tg3_set_rx_mode(struct net_device *dev)
9858 {
9859         struct tg3 *tp = netdev_priv(dev);
9860         u32 rx_mode;
9861
9862         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9863                                   RX_MODE_KEEP_VLAN_TAG);
9864
9865 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9866         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9867          * flag clear.
9868          */
9869         if (!tg3_flag(tp, ENABLE_ASF))
9870                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9871 #endif
9872
9873         if (dev->flags & IFF_PROMISC) {
9874                 /* Promiscuous mode. */
9875                 rx_mode |= RX_MODE_PROMISC;
9876         } else if (dev->flags & IFF_ALLMULTI) {
9877                 /* Accept all multicast. */
9878                 tg3_set_multi(tp, 1);
9879         } else if (netdev_mc_empty(dev)) {
9880                 /* Reject all multicast. */
9881                 tg3_set_multi(tp, 0);
9882         } else {
9883                 /* Accept one or more multicast(s). */
9884                 struct netdev_hw_addr *ha;
9885                 u32 mc_filter[4] = { 0, };
9886                 u32 regidx;
9887                 u32 bit;
9888                 u32 crc;
9889
9890                 netdev_for_each_mc_addr(ha, dev) {
9891                         crc = calc_crc(ha->addr, ETH_ALEN);
9892                         bit = ~crc & 0x7f;
9893                         regidx = (bit & 0x60) >> 5;
9894                         bit &= 0x1f;
9895                         mc_filter[regidx] |= (1 << bit);
9896                 }
9897
9898                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9899                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9900                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9901                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9902         }
9903
9904         if (rx_mode != tp->rx_mode) {
9905                 tp->rx_mode = rx_mode;
9906                 tw32_f(MAC_RX_MODE, rx_mode);
9907                 udelay(10);
9908         }
9909 }
9910
9911 static void tg3_set_rx_mode(struct net_device *dev)
9912 {
9913         struct tg3 *tp = netdev_priv(dev);
9914
9915         if (!netif_running(dev))
9916                 return;
9917
9918         tg3_full_lock(tp, 0);
9919         __tg3_set_rx_mode(dev);
9920         tg3_full_unlock(tp);
9921 }
9922
9923 static int tg3_get_regs_len(struct net_device *dev)
9924 {
9925         return TG3_REG_BLK_SIZE;
9926 }
9927
9928 static void tg3_get_regs(struct net_device *dev,
9929                 struct ethtool_regs *regs, void *_p)
9930 {
9931         struct tg3 *tp = netdev_priv(dev);
9932
9933         regs->version = 0;
9934
9935         memset(_p, 0, TG3_REG_BLK_SIZE);
9936
9937         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9938                 return;
9939
9940         tg3_full_lock(tp, 0);
9941
9942         tg3_dump_legacy_regs(tp, (u32 *)_p);
9943
9944         tg3_full_unlock(tp);
9945 }
9946
9947 static int tg3_get_eeprom_len(struct net_device *dev)
9948 {
9949         struct tg3 *tp = netdev_priv(dev);
9950
9951         return tp->nvram_size;
9952 }
9953
9954 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9955 {
9956         struct tg3 *tp = netdev_priv(dev);
9957         int ret;
9958         u8  *pd;
9959         u32 i, offset, len, b_offset, b_count;
9960         __be32 val;
9961
9962         if (tg3_flag(tp, NO_NVRAM))
9963                 return -EINVAL;
9964
9965         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9966                 return -EAGAIN;
9967
9968         offset = eeprom->offset;
9969         len = eeprom->len;
9970         eeprom->len = 0;
9971
9972         eeprom->magic = TG3_EEPROM_MAGIC;
9973
9974         if (offset & 3) {
9975                 /* adjustments to start on required 4 byte boundary */
9976                 b_offset = offset & 3;
9977                 b_count = 4 - b_offset;
9978                 if (b_count > len) {
9979                         /* i.e. offset=1 len=2 */
9980                         b_count = len;
9981                 }
9982                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9983                 if (ret)
9984                         return ret;
9985                 memcpy(data, ((char *)&val) + b_offset, b_count);
9986                 len -= b_count;
9987                 offset += b_count;
9988                 eeprom->len += b_count;
9989         }
9990
9991         /* read bytes up to the last 4 byte boundary */
9992         pd = &data[eeprom->len];
9993         for (i = 0; i < (len - (len & 3)); i += 4) {
9994                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9995                 if (ret) {
9996                         eeprom->len += i;
9997                         return ret;
9998                 }
9999                 memcpy(pd + i, &val, 4);
10000         }
10001         eeprom->len += i;
10002
10003         if (len & 3) {
10004                 /* read last bytes not ending on 4 byte boundary */
10005                 pd = &data[eeprom->len];
10006                 b_count = len & 3;
10007                 b_offset = offset + len - b_count;
10008                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10009                 if (ret)
10010                         return ret;
10011                 memcpy(pd, &val, b_count);
10012                 eeprom->len += b_count;
10013         }
10014         return 0;
10015 }
10016
10017 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10018
10019 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10020 {
10021         struct tg3 *tp = netdev_priv(dev);
10022         int ret;
10023         u32 offset, len, b_offset, odd_len;
10024         u8 *buf;
10025         __be32 start, end;
10026
10027         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10028                 return -EAGAIN;
10029
10030         if (tg3_flag(tp, NO_NVRAM) ||
10031             eeprom->magic != TG3_EEPROM_MAGIC)
10032                 return -EINVAL;
10033
10034         offset = eeprom->offset;
10035         len = eeprom->len;
10036
10037         if ((b_offset = (offset & 3))) {
10038                 /* adjustments to start on required 4 byte boundary */
10039                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10040                 if (ret)
10041                         return ret;
10042                 len += b_offset;
10043                 offset &= ~3;
10044                 if (len < 4)
10045                         len = 4;
10046         }
10047
10048         odd_len = 0;
10049         if (len & 3) {
10050                 /* adjustments to end on required 4 byte boundary */
10051                 odd_len = 1;
10052                 len = (len + 3) & ~3;
10053                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10054                 if (ret)
10055                         return ret;
10056         }
10057
10058         buf = data;
10059         if (b_offset || odd_len) {
10060                 buf = kmalloc(len, GFP_KERNEL);
10061                 if (!buf)
10062                         return -ENOMEM;
10063                 if (b_offset)
10064                         memcpy(buf, &start, 4);
10065                 if (odd_len)
10066                         memcpy(buf+len-4, &end, 4);
10067                 memcpy(buf + b_offset, data, eeprom->len);
10068         }
10069
10070         ret = tg3_nvram_write_block(tp, offset, len, buf);
10071
10072         if (buf != data)
10073                 kfree(buf);
10074
10075         return ret;
10076 }
10077
10078 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10079 {
10080         struct tg3 *tp = netdev_priv(dev);
10081
10082         if (tg3_flag(tp, USE_PHYLIB)) {
10083                 struct phy_device *phydev;
10084                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10085                         return -EAGAIN;
10086                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10087                 return phy_ethtool_gset(phydev, cmd);
10088         }
10089
10090         cmd->supported = (SUPPORTED_Autoneg);
10091
10092         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10093                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10094                                    SUPPORTED_1000baseT_Full);
10095
10096         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10097                 cmd->supported |= (SUPPORTED_100baseT_Half |
10098                                   SUPPORTED_100baseT_Full |
10099                                   SUPPORTED_10baseT_Half |
10100                                   SUPPORTED_10baseT_Full |
10101                                   SUPPORTED_TP);
10102                 cmd->port = PORT_TP;
10103         } else {
10104                 cmd->supported |= SUPPORTED_FIBRE;
10105                 cmd->port = PORT_FIBRE;
10106         }
10107
10108         cmd->advertising = tp->link_config.advertising;
10109         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10110                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10111                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10112                                 cmd->advertising |= ADVERTISED_Pause;
10113                         } else {
10114                                 cmd->advertising |= ADVERTISED_Pause |
10115                                                     ADVERTISED_Asym_Pause;
10116                         }
10117                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10118                         cmd->advertising |= ADVERTISED_Asym_Pause;
10119                 }
10120         }
10121         if (netif_running(dev)) {
10122                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10123                 cmd->duplex = tp->link_config.active_duplex;
10124         } else {
10125                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10126                 cmd->duplex = DUPLEX_INVALID;
10127         }
10128         cmd->phy_address = tp->phy_addr;
10129         cmd->transceiver = XCVR_INTERNAL;
10130         cmd->autoneg = tp->link_config.autoneg;
10131         cmd->maxtxpkt = 0;
10132         cmd->maxrxpkt = 0;
10133         return 0;
10134 }
10135
10136 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10137 {
10138         struct tg3 *tp = netdev_priv(dev);
10139         u32 speed = ethtool_cmd_speed(cmd);
10140
10141         if (tg3_flag(tp, USE_PHYLIB)) {
10142                 struct phy_device *phydev;
10143                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10144                         return -EAGAIN;
10145                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10146                 return phy_ethtool_sset(phydev, cmd);
10147         }
10148
10149         if (cmd->autoneg != AUTONEG_ENABLE &&
10150             cmd->autoneg != AUTONEG_DISABLE)
10151                 return -EINVAL;
10152
10153         if (cmd->autoneg == AUTONEG_DISABLE &&
10154             cmd->duplex != DUPLEX_FULL &&
10155             cmd->duplex != DUPLEX_HALF)
10156                 return -EINVAL;
10157
10158         if (cmd->autoneg == AUTONEG_ENABLE) {
10159                 u32 mask = ADVERTISED_Autoneg |
10160                            ADVERTISED_Pause |
10161                            ADVERTISED_Asym_Pause;
10162
10163                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10164                         mask |= ADVERTISED_1000baseT_Half |
10165                                 ADVERTISED_1000baseT_Full;
10166
10167                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10168                         mask |= ADVERTISED_100baseT_Half |
10169                                 ADVERTISED_100baseT_Full |
10170                                 ADVERTISED_10baseT_Half |
10171                                 ADVERTISED_10baseT_Full |
10172                                 ADVERTISED_TP;
10173                 else
10174                         mask |= ADVERTISED_FIBRE;
10175
10176                 if (cmd->advertising & ~mask)
10177                         return -EINVAL;
10178
10179                 mask &= (ADVERTISED_1000baseT_Half |
10180                          ADVERTISED_1000baseT_Full |
10181                          ADVERTISED_100baseT_Half |
10182                          ADVERTISED_100baseT_Full |
10183                          ADVERTISED_10baseT_Half |
10184                          ADVERTISED_10baseT_Full);
10185
10186                 cmd->advertising &= mask;
10187         } else {
10188                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10189                         if (speed != SPEED_1000)
10190                                 return -EINVAL;
10191
10192                         if (cmd->duplex != DUPLEX_FULL)
10193                                 return -EINVAL;
10194                 } else {
10195                         if (speed != SPEED_100 &&
10196                             speed != SPEED_10)
10197                                 return -EINVAL;
10198                 }
10199         }
10200
10201         tg3_full_lock(tp, 0);
10202
10203         tp->link_config.autoneg = cmd->autoneg;
10204         if (cmd->autoneg == AUTONEG_ENABLE) {
10205                 tp->link_config.advertising = (cmd->advertising |
10206                                               ADVERTISED_Autoneg);
10207                 tp->link_config.speed = SPEED_INVALID;
10208                 tp->link_config.duplex = DUPLEX_INVALID;
10209         } else {
10210                 tp->link_config.advertising = 0;
10211                 tp->link_config.speed = speed;
10212                 tp->link_config.duplex = cmd->duplex;
10213         }
10214
10215         tp->link_config.orig_speed = tp->link_config.speed;
10216         tp->link_config.orig_duplex = tp->link_config.duplex;
10217         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10218
10219         if (netif_running(dev))
10220                 tg3_setup_phy(tp, 1);
10221
10222         tg3_full_unlock(tp);
10223
10224         return 0;
10225 }
10226
10227 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10228 {
10229         struct tg3 *tp = netdev_priv(dev);
10230
10231         strcpy(info->driver, DRV_MODULE_NAME);
10232         strcpy(info->version, DRV_MODULE_VERSION);
10233         strcpy(info->fw_version, tp->fw_ver);
10234         strcpy(info->bus_info, pci_name(tp->pdev));
10235 }
10236
10237 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10238 {
10239         struct tg3 *tp = netdev_priv(dev);
10240
10241         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10242                 wol->supported = WAKE_MAGIC;
10243         else
10244                 wol->supported = 0;
10245         wol->wolopts = 0;
10246         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10247                 wol->wolopts = WAKE_MAGIC;
10248         memset(&wol->sopass, 0, sizeof(wol->sopass));
10249 }
10250
10251 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10252 {
10253         struct tg3 *tp = netdev_priv(dev);
10254         struct device *dp = &tp->pdev->dev;
10255
10256         if (wol->wolopts & ~WAKE_MAGIC)
10257                 return -EINVAL;
10258         if ((wol->wolopts & WAKE_MAGIC) &&
10259             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10260                 return -EINVAL;
10261
10262         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10263
10264         spin_lock_bh(&tp->lock);
10265         if (device_may_wakeup(dp))
10266                 tg3_flag_set(tp, WOL_ENABLE);
10267         else
10268                 tg3_flag_clear(tp, WOL_ENABLE);
10269         spin_unlock_bh(&tp->lock);
10270
10271         return 0;
10272 }
10273
10274 static u32 tg3_get_msglevel(struct net_device *dev)
10275 {
10276         struct tg3 *tp = netdev_priv(dev);
10277         return tp->msg_enable;
10278 }
10279
10280 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10281 {
10282         struct tg3 *tp = netdev_priv(dev);
10283         tp->msg_enable = value;
10284 }
10285
10286 static int tg3_nway_reset(struct net_device *dev)
10287 {
10288         struct tg3 *tp = netdev_priv(dev);
10289         int r;
10290
10291         if (!netif_running(dev))
10292                 return -EAGAIN;
10293
10294         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10295                 return -EINVAL;
10296
10297         if (tg3_flag(tp, USE_PHYLIB)) {
10298                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10299                         return -EAGAIN;
10300                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10301         } else {
10302                 u32 bmcr;
10303
10304                 spin_lock_bh(&tp->lock);
10305                 r = -EINVAL;
10306                 tg3_readphy(tp, MII_BMCR, &bmcr);
10307                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10308                     ((bmcr & BMCR_ANENABLE) ||
10309                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10310                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10311                                                    BMCR_ANENABLE);
10312                         r = 0;
10313                 }
10314                 spin_unlock_bh(&tp->lock);
10315         }
10316
10317         return r;
10318 }
10319
10320 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10321 {
10322         struct tg3 *tp = netdev_priv(dev);
10323
10324         ering->rx_max_pending = tp->rx_std_ring_mask;
10325         ering->rx_mini_max_pending = 0;
10326         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10327                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10328         else
10329                 ering->rx_jumbo_max_pending = 0;
10330
10331         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10332
10333         ering->rx_pending = tp->rx_pending;
10334         ering->rx_mini_pending = 0;
10335         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10336                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10337         else
10338                 ering->rx_jumbo_pending = 0;
10339
10340         ering->tx_pending = tp->napi[0].tx_pending;
10341 }
10342
10343 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10344 {
10345         struct tg3 *tp = netdev_priv(dev);
10346         int i, irq_sync = 0, err = 0;
10347
10348         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10349             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10350             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10351             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10352             (tg3_flag(tp, TSO_BUG) &&
10353              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10354                 return -EINVAL;
10355
10356         if (netif_running(dev)) {
10357                 tg3_phy_stop(tp);
10358                 tg3_netif_stop(tp);
10359                 irq_sync = 1;
10360         }
10361
10362         tg3_full_lock(tp, irq_sync);
10363
10364         tp->rx_pending = ering->rx_pending;
10365
10366         if (tg3_flag(tp, MAX_RXPEND_64) &&
10367             tp->rx_pending > 63)
10368                 tp->rx_pending = 63;
10369         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10370
10371         for (i = 0; i < tp->irq_max; i++)
10372                 tp->napi[i].tx_pending = ering->tx_pending;
10373
10374         if (netif_running(dev)) {
10375                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10376                 err = tg3_restart_hw(tp, 1);
10377                 if (!err)
10378                         tg3_netif_start(tp);
10379         }
10380
10381         tg3_full_unlock(tp);
10382
10383         if (irq_sync && !err)
10384                 tg3_phy_start(tp);
10385
10386         return err;
10387 }
10388
10389 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10390 {
10391         struct tg3 *tp = netdev_priv(dev);
10392
10393         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10394
10395         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10396                 epause->rx_pause = 1;
10397         else
10398                 epause->rx_pause = 0;
10399
10400         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10401                 epause->tx_pause = 1;
10402         else
10403                 epause->tx_pause = 0;
10404 }
10405
10406 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10407 {
10408         struct tg3 *tp = netdev_priv(dev);
10409         int err = 0;
10410
10411         if (tg3_flag(tp, USE_PHYLIB)) {
10412                 u32 newadv;
10413                 struct phy_device *phydev;
10414
10415                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10416
10417                 if (!(phydev->supported & SUPPORTED_Pause) ||
10418                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10419                      (epause->rx_pause != epause->tx_pause)))
10420                         return -EINVAL;
10421
10422                 tp->link_config.flowctrl = 0;
10423                 if (epause->rx_pause) {
10424                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10425
10426                         if (epause->tx_pause) {
10427                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10428                                 newadv = ADVERTISED_Pause;
10429                         } else
10430                                 newadv = ADVERTISED_Pause |
10431                                          ADVERTISED_Asym_Pause;
10432                 } else if (epause->tx_pause) {
10433                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10434                         newadv = ADVERTISED_Asym_Pause;
10435                 } else
10436                         newadv = 0;
10437
10438                 if (epause->autoneg)
10439                         tg3_flag_set(tp, PAUSE_AUTONEG);
10440                 else
10441                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10442
10443                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10444                         u32 oldadv = phydev->advertising &
10445                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10446                         if (oldadv != newadv) {
10447                                 phydev->advertising &=
10448                                         ~(ADVERTISED_Pause |
10449                                           ADVERTISED_Asym_Pause);
10450                                 phydev->advertising |= newadv;
10451                                 if (phydev->autoneg) {
10452                                         /*
10453                                          * Always renegotiate the link to
10454                                          * inform our link partner of our
10455                                          * flow control settings, even if the
10456                                          * flow control is forced.  Let
10457                                          * tg3_adjust_link() do the final
10458                                          * flow control setup.
10459                                          */
10460                                         return phy_start_aneg(phydev);
10461                                 }
10462                         }
10463
10464                         if (!epause->autoneg)
10465                                 tg3_setup_flow_control(tp, 0, 0);
10466                 } else {
10467                         tp->link_config.orig_advertising &=
10468                                         ~(ADVERTISED_Pause |
10469                                           ADVERTISED_Asym_Pause);
10470                         tp->link_config.orig_advertising |= newadv;
10471                 }
10472         } else {
10473                 int irq_sync = 0;
10474
10475                 if (netif_running(dev)) {
10476                         tg3_netif_stop(tp);
10477                         irq_sync = 1;
10478                 }
10479
10480                 tg3_full_lock(tp, irq_sync);
10481
10482                 if (epause->autoneg)
10483                         tg3_flag_set(tp, PAUSE_AUTONEG);
10484                 else
10485                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10486                 if (epause->rx_pause)
10487                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10488                 else
10489                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10490                 if (epause->tx_pause)
10491                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10492                 else
10493                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10494
10495                 if (netif_running(dev)) {
10496                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10497                         err = tg3_restart_hw(tp, 1);
10498                         if (!err)
10499                                 tg3_netif_start(tp);
10500                 }
10501
10502                 tg3_full_unlock(tp);
10503         }
10504
10505         return err;
10506 }
10507
10508 static int tg3_get_sset_count(struct net_device *dev, int sset)
10509 {
10510         switch (sset) {
10511         case ETH_SS_TEST:
10512                 return TG3_NUM_TEST;
10513         case ETH_SS_STATS:
10514                 return TG3_NUM_STATS;
10515         default:
10516                 return -EOPNOTSUPP;
10517         }
10518 }
10519
10520 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10521 {
10522         switch (stringset) {
10523         case ETH_SS_STATS:
10524                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10525                 break;
10526         case ETH_SS_TEST:
10527                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10528                 break;
10529         default:
10530                 WARN_ON(1);     /* we need a WARN() */
10531                 break;
10532         }
10533 }
10534
10535 static int tg3_set_phys_id(struct net_device *dev,
10536                             enum ethtool_phys_id_state state)
10537 {
10538         struct tg3 *tp = netdev_priv(dev);
10539
10540         if (!netif_running(tp->dev))
10541                 return -EAGAIN;
10542
10543         switch (state) {
10544         case ETHTOOL_ID_ACTIVE:
10545                 return 1;       /* cycle on/off once per second */
10546
10547         case ETHTOOL_ID_ON:
10548                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10549                      LED_CTRL_1000MBPS_ON |
10550                      LED_CTRL_100MBPS_ON |
10551                      LED_CTRL_10MBPS_ON |
10552                      LED_CTRL_TRAFFIC_OVERRIDE |
10553                      LED_CTRL_TRAFFIC_BLINK |
10554                      LED_CTRL_TRAFFIC_LED);
10555                 break;
10556
10557         case ETHTOOL_ID_OFF:
10558                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10559                      LED_CTRL_TRAFFIC_OVERRIDE);
10560                 break;
10561
10562         case ETHTOOL_ID_INACTIVE:
10563                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10564                 break;
10565         }
10566
10567         return 0;
10568 }
10569
10570 static void tg3_get_ethtool_stats(struct net_device *dev,
10571                                    struct ethtool_stats *estats, u64 *tmp_stats)
10572 {
10573         struct tg3 *tp = netdev_priv(dev);
10574         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10575 }
10576
10577 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10578 {
10579         int i;
10580         __be32 *buf;
10581         u32 offset = 0, len = 0;
10582         u32 magic, val;
10583
10584         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10585                 return NULL;
10586
10587         if (magic == TG3_EEPROM_MAGIC) {
10588                 for (offset = TG3_NVM_DIR_START;
10589                      offset < TG3_NVM_DIR_END;
10590                      offset += TG3_NVM_DIRENT_SIZE) {
10591                         if (tg3_nvram_read(tp, offset, &val))
10592                                 return NULL;
10593
10594                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10595                             TG3_NVM_DIRTYPE_EXTVPD)
10596                                 break;
10597                 }
10598
10599                 if (offset != TG3_NVM_DIR_END) {
10600                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10601                         if (tg3_nvram_read(tp, offset + 4, &offset))
10602                                 return NULL;
10603
10604                         offset = tg3_nvram_logical_addr(tp, offset);
10605                 }
10606         }
10607
10608         if (!offset || !len) {
10609                 offset = TG3_NVM_VPD_OFF;
10610                 len = TG3_NVM_VPD_LEN;
10611         }
10612
10613         buf = kmalloc(len, GFP_KERNEL);
10614         if (buf == NULL)
10615                 return NULL;
10616
10617         if (magic == TG3_EEPROM_MAGIC) {
10618                 for (i = 0; i < len; i += 4) {
10619                         /* The data is in little-endian format in NVRAM.
10620                          * Use the big-endian read routines to preserve
10621                          * the byte order as it exists in NVRAM.
10622                          */
10623                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10624                                 goto error;
10625                 }
10626         } else {
10627                 u8 *ptr;
10628                 ssize_t cnt;
10629                 unsigned int pos = 0;
10630
10631                 ptr = (u8 *)&buf[0];
10632                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10633                         cnt = pci_read_vpd(tp->pdev, pos,
10634                                            len - pos, ptr);
10635                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10636                                 cnt = 0;
10637                         else if (cnt < 0)
10638                                 goto error;
10639                 }
10640                 if (pos != len)
10641                         goto error;
10642         }
10643
10644         *vpdlen = len;
10645
10646         return buf;
10647
10648 error:
10649         kfree(buf);
10650         return NULL;
10651 }
10652
10653 #define NVRAM_TEST_SIZE 0x100
10654 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10655 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10656 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10657 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10658 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10659 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10660 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10661 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10662
10663 static int tg3_test_nvram(struct tg3 *tp)
10664 {
10665         u32 csum, magic, len;
10666         __be32 *buf;
10667         int i, j, k, err = 0, size;
10668
10669         if (tg3_flag(tp, NO_NVRAM))
10670                 return 0;
10671
10672         if (tg3_nvram_read(tp, 0, &magic) != 0)
10673                 return -EIO;
10674
10675         if (magic == TG3_EEPROM_MAGIC)
10676                 size = NVRAM_TEST_SIZE;
10677         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10678                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10679                     TG3_EEPROM_SB_FORMAT_1) {
10680                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10681                         case TG3_EEPROM_SB_REVISION_0:
10682                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10683                                 break;
10684                         case TG3_EEPROM_SB_REVISION_2:
10685                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10686                                 break;
10687                         case TG3_EEPROM_SB_REVISION_3:
10688                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10689                                 break;
10690                         case TG3_EEPROM_SB_REVISION_4:
10691                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10692                                 break;
10693                         case TG3_EEPROM_SB_REVISION_5:
10694                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10695                                 break;
10696                         case TG3_EEPROM_SB_REVISION_6:
10697                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10698                                 break;
10699                         default:
10700                                 return -EIO;
10701                         }
10702                 } else
10703                         return 0;
10704         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10705                 size = NVRAM_SELFBOOT_HW_SIZE;
10706         else
10707                 return -EIO;
10708
10709         buf = kmalloc(size, GFP_KERNEL);
10710         if (buf == NULL)
10711                 return -ENOMEM;
10712
10713         err = -EIO;
10714         for (i = 0, j = 0; i < size; i += 4, j++) {
10715                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10716                 if (err)
10717                         break;
10718         }
10719         if (i < size)
10720                 goto out;
10721
10722         /* Selfboot format */
10723         magic = be32_to_cpu(buf[0]);
10724         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10725             TG3_EEPROM_MAGIC_FW) {
10726                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10727
10728                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10729                     TG3_EEPROM_SB_REVISION_2) {
10730                         /* For rev 2, the csum doesn't include the MBA. */
10731                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10732                                 csum8 += buf8[i];
10733                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10734                                 csum8 += buf8[i];
10735                 } else {
10736                         for (i = 0; i < size; i++)
10737                                 csum8 += buf8[i];
10738                 }
10739
10740                 if (csum8 == 0) {
10741                         err = 0;
10742                         goto out;
10743                 }
10744
10745                 err = -EIO;
10746                 goto out;
10747         }
10748
10749         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10750             TG3_EEPROM_MAGIC_HW) {
10751                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10752                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10753                 u8 *buf8 = (u8 *) buf;
10754
10755                 /* Separate the parity bits and the data bytes.  */
10756                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10757                         if ((i == 0) || (i == 8)) {
10758                                 int l;
10759                                 u8 msk;
10760
10761                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10762                                         parity[k++] = buf8[i] & msk;
10763                                 i++;
10764                         } else if (i == 16) {
10765                                 int l;
10766                                 u8 msk;
10767
10768                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10769                                         parity[k++] = buf8[i] & msk;
10770                                 i++;
10771
10772                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10773                                         parity[k++] = buf8[i] & msk;
10774                                 i++;
10775                         }
10776                         data[j++] = buf8[i];
10777                 }
10778
10779                 err = -EIO;
10780                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10781                         u8 hw8 = hweight8(data[i]);
10782
10783                         if ((hw8 & 0x1) && parity[i])
10784                                 goto out;
10785                         else if (!(hw8 & 0x1) && !parity[i])
10786                                 goto out;
10787                 }
10788                 err = 0;
10789                 goto out;
10790         }
10791
10792         err = -EIO;
10793
10794         /* Bootstrap checksum at offset 0x10 */
10795         csum = calc_crc((unsigned char *) buf, 0x10);
10796         if (csum != le32_to_cpu(buf[0x10/4]))
10797                 goto out;
10798
10799         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10800         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10801         if (csum != le32_to_cpu(buf[0xfc/4]))
10802                 goto out;
10803
10804         kfree(buf);
10805
10806         buf = tg3_vpd_readblock(tp, &len);
10807         if (!buf)
10808                 return -ENOMEM;
10809
10810         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10811         if (i > 0) {
10812                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10813                 if (j < 0)
10814                         goto out;
10815
10816                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10817                         goto out;
10818
10819                 i += PCI_VPD_LRDT_TAG_SIZE;
10820                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10821                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10822                 if (j > 0) {
10823                         u8 csum8 = 0;
10824
10825                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10826
10827                         for (i = 0; i <= j; i++)
10828                                 csum8 += ((u8 *)buf)[i];
10829
10830                         if (csum8)
10831                                 goto out;
10832                 }
10833         }
10834
10835         err = 0;
10836
10837 out:
10838         kfree(buf);
10839         return err;
10840 }
10841
10842 #define TG3_SERDES_TIMEOUT_SEC  2
10843 #define TG3_COPPER_TIMEOUT_SEC  6
10844
10845 static int tg3_test_link(struct tg3 *tp)
10846 {
10847         int i, max;
10848
10849         if (!netif_running(tp->dev))
10850                 return -ENODEV;
10851
10852         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10853                 max = TG3_SERDES_TIMEOUT_SEC;
10854         else
10855                 max = TG3_COPPER_TIMEOUT_SEC;
10856
10857         for (i = 0; i < max; i++) {
10858                 if (netif_carrier_ok(tp->dev))
10859                         return 0;
10860
10861                 if (msleep_interruptible(1000))
10862                         break;
10863         }
10864
10865         return -EIO;
10866 }
10867
10868 /* Only test the commonly used registers */
10869 static int tg3_test_registers(struct tg3 *tp)
10870 {
10871         int i, is_5705, is_5750;
10872         u32 offset, read_mask, write_mask, val, save_val, read_val;
10873         static struct {
10874                 u16 offset;
10875                 u16 flags;
10876 #define TG3_FL_5705     0x1
10877 #define TG3_FL_NOT_5705 0x2
10878 #define TG3_FL_NOT_5788 0x4
10879 #define TG3_FL_NOT_5750 0x8
10880                 u32 read_mask;
10881                 u32 write_mask;
10882         } reg_tbl[] = {
10883                 /* MAC Control Registers */
10884                 { MAC_MODE, TG3_FL_NOT_5705,
10885                         0x00000000, 0x00ef6f8c },
10886                 { MAC_MODE, TG3_FL_5705,
10887                         0x00000000, 0x01ef6b8c },
10888                 { MAC_STATUS, TG3_FL_NOT_5705,
10889                         0x03800107, 0x00000000 },
10890                 { MAC_STATUS, TG3_FL_5705,
10891                         0x03800100, 0x00000000 },
10892                 { MAC_ADDR_0_HIGH, 0x0000,
10893                         0x00000000, 0x0000ffff },
10894                 { MAC_ADDR_0_LOW, 0x0000,
10895                         0x00000000, 0xffffffff },
10896                 { MAC_RX_MTU_SIZE, 0x0000,
10897                         0x00000000, 0x0000ffff },
10898                 { MAC_TX_MODE, 0x0000,
10899                         0x00000000, 0x00000070 },
10900                 { MAC_TX_LENGTHS, 0x0000,
10901                         0x00000000, 0x00003fff },
10902                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10903                         0x00000000, 0x000007fc },
10904                 { MAC_RX_MODE, TG3_FL_5705,
10905                         0x00000000, 0x000007dc },
10906                 { MAC_HASH_REG_0, 0x0000,
10907                         0x00000000, 0xffffffff },
10908                 { MAC_HASH_REG_1, 0x0000,
10909                         0x00000000, 0xffffffff },
10910                 { MAC_HASH_REG_2, 0x0000,
10911                         0x00000000, 0xffffffff },
10912                 { MAC_HASH_REG_3, 0x0000,
10913                         0x00000000, 0xffffffff },
10914
10915                 /* Receive Data and Receive BD Initiator Control Registers. */
10916                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10917                         0x00000000, 0xffffffff },
10918                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10919                         0x00000000, 0xffffffff },
10920                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10921                         0x00000000, 0x00000003 },
10922                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10923                         0x00000000, 0xffffffff },
10924                 { RCVDBDI_STD_BD+0, 0x0000,
10925                         0x00000000, 0xffffffff },
10926                 { RCVDBDI_STD_BD+4, 0x0000,
10927                         0x00000000, 0xffffffff },
10928                 { RCVDBDI_STD_BD+8, 0x0000,
10929                         0x00000000, 0xffff0002 },
10930                 { RCVDBDI_STD_BD+0xc, 0x0000,
10931                         0x00000000, 0xffffffff },
10932
10933                 /* Receive BD Initiator Control Registers. */
10934                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10935                         0x00000000, 0xffffffff },
10936                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10937                         0x00000000, 0x000003ff },
10938                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10939                         0x00000000, 0xffffffff },
10940
10941                 /* Host Coalescing Control Registers. */
10942                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10943                         0x00000000, 0x00000004 },
10944                 { HOSTCC_MODE, TG3_FL_5705,
10945                         0x00000000, 0x000000f6 },
10946                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10947                         0x00000000, 0xffffffff },
10948                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10949                         0x00000000, 0x000003ff },
10950                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10951                         0x00000000, 0xffffffff },
10952                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10953                         0x00000000, 0x000003ff },
10954                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10955                         0x00000000, 0xffffffff },
10956                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10957                         0x00000000, 0x000000ff },
10958                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10959                         0x00000000, 0xffffffff },
10960                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10961                         0x00000000, 0x000000ff },
10962                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10963                         0x00000000, 0xffffffff },
10964                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10965                         0x00000000, 0xffffffff },
10966                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10967                         0x00000000, 0xffffffff },
10968                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10969                         0x00000000, 0x000000ff },
10970                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10971                         0x00000000, 0xffffffff },
10972                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10973                         0x00000000, 0x000000ff },
10974                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10975                         0x00000000, 0xffffffff },
10976                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10977                         0x00000000, 0xffffffff },
10978                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10979                         0x00000000, 0xffffffff },
10980                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10981                         0x00000000, 0xffffffff },
10982                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10983                         0x00000000, 0xffffffff },
10984                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10985                         0xffffffff, 0x00000000 },
10986                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10987                         0xffffffff, 0x00000000 },
10988
10989                 /* Buffer Manager Control Registers. */
10990                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10991                         0x00000000, 0x007fff80 },
10992                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10993                         0x00000000, 0x007fffff },
10994                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10995                         0x00000000, 0x0000003f },
10996                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10997                         0x00000000, 0x000001ff },
10998                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10999                         0x00000000, 0x000001ff },
11000                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11001                         0xffffffff, 0x00000000 },
11002                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11003                         0xffffffff, 0x00000000 },
11004
11005                 /* Mailbox Registers */
11006                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11007                         0x00000000, 0x000001ff },
11008                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11009                         0x00000000, 0x000001ff },
11010                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11011                         0x00000000, 0x000007ff },
11012                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11013                         0x00000000, 0x000001ff },
11014
11015                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11016         };
11017
11018         is_5705 = is_5750 = 0;
11019         if (tg3_flag(tp, 5705_PLUS)) {
11020                 is_5705 = 1;
11021                 if (tg3_flag(tp, 5750_PLUS))
11022                         is_5750 = 1;
11023         }
11024
11025         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11026                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11027                         continue;
11028
11029                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11030                         continue;
11031
11032                 if (tg3_flag(tp, IS_5788) &&
11033                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11034                         continue;
11035
11036                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11037                         continue;
11038
11039                 offset = (u32) reg_tbl[i].offset;
11040                 read_mask = reg_tbl[i].read_mask;
11041                 write_mask = reg_tbl[i].write_mask;
11042
11043                 /* Save the original register content */
11044                 save_val = tr32(offset);
11045
11046                 /* Determine the read-only value. */
11047                 read_val = save_val & read_mask;
11048
11049                 /* Write zero to the register, then make sure the read-only bits
11050                  * are not changed and the read/write bits are all zeros.
11051                  */
11052                 tw32(offset, 0);
11053
11054                 val = tr32(offset);
11055
11056                 /* Test the read-only and read/write bits. */
11057                 if (((val & read_mask) != read_val) || (val & write_mask))
11058                         goto out;
11059
11060                 /* Write ones to all the bits defined by RdMask and WrMask, then
11061                  * make sure the read-only bits are not changed and the
11062                  * read/write bits are all ones.
11063                  */
11064                 tw32(offset, read_mask | write_mask);
11065
11066                 val = tr32(offset);
11067
11068                 /* Test the read-only bits. */
11069                 if ((val & read_mask) != read_val)
11070                         goto out;
11071
11072                 /* Test the read/write bits. */
11073                 if ((val & write_mask) != write_mask)
11074                         goto out;
11075
11076                 tw32(offset, save_val);
11077         }
11078
11079         return 0;
11080
11081 out:
11082         if (netif_msg_hw(tp))
11083                 netdev_err(tp->dev,
11084                            "Register test failed at offset %x\n", offset);
11085         tw32(offset, save_val);
11086         return -EIO;
11087 }
11088
11089 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11090 {
11091         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11092         int i;
11093         u32 j;
11094
11095         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11096                 for (j = 0; j < len; j += 4) {
11097                         u32 val;
11098
11099                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11100                         tg3_read_mem(tp, offset + j, &val);
11101                         if (val != test_pattern[i])
11102                                 return -EIO;
11103                 }
11104         }
11105         return 0;
11106 }
11107
11108 static int tg3_test_memory(struct tg3 *tp)
11109 {
11110         static struct mem_entry {
11111                 u32 offset;
11112                 u32 len;
11113         } mem_tbl_570x[] = {
11114                 { 0x00000000, 0x00b50},
11115                 { 0x00002000, 0x1c000},
11116                 { 0xffffffff, 0x00000}
11117         }, mem_tbl_5705[] = {
11118                 { 0x00000100, 0x0000c},
11119                 { 0x00000200, 0x00008},
11120                 { 0x00004000, 0x00800},
11121                 { 0x00006000, 0x01000},
11122                 { 0x00008000, 0x02000},
11123                 { 0x00010000, 0x0e000},
11124                 { 0xffffffff, 0x00000}
11125         }, mem_tbl_5755[] = {
11126                 { 0x00000200, 0x00008},
11127                 { 0x00004000, 0x00800},
11128                 { 0x00006000, 0x00800},
11129                 { 0x00008000, 0x02000},
11130                 { 0x00010000, 0x0c000},
11131                 { 0xffffffff, 0x00000}
11132         }, mem_tbl_5906[] = {
11133                 { 0x00000200, 0x00008},
11134                 { 0x00004000, 0x00400},
11135                 { 0x00006000, 0x00400},
11136                 { 0x00008000, 0x01000},
11137                 { 0x00010000, 0x01000},
11138                 { 0xffffffff, 0x00000}
11139         }, mem_tbl_5717[] = {
11140                 { 0x00000200, 0x00008},
11141                 { 0x00010000, 0x0a000},
11142                 { 0x00020000, 0x13c00},
11143                 { 0xffffffff, 0x00000}
11144         }, mem_tbl_57765[] = {
11145                 { 0x00000200, 0x00008},
11146                 { 0x00004000, 0x00800},
11147                 { 0x00006000, 0x09800},
11148                 { 0x00010000, 0x0a000},
11149                 { 0xffffffff, 0x00000}
11150         };
11151         struct mem_entry *mem_tbl;
11152         int err = 0;
11153         int i;
11154
11155         if (tg3_flag(tp, 5717_PLUS))
11156                 mem_tbl = mem_tbl_5717;
11157         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11158                 mem_tbl = mem_tbl_57765;
11159         else if (tg3_flag(tp, 5755_PLUS))
11160                 mem_tbl = mem_tbl_5755;
11161         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11162                 mem_tbl = mem_tbl_5906;
11163         else if (tg3_flag(tp, 5705_PLUS))
11164                 mem_tbl = mem_tbl_5705;
11165         else
11166                 mem_tbl = mem_tbl_570x;
11167
11168         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11169                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11170                 if (err)
11171                         break;
11172         }
11173
11174         return err;
11175 }
11176
11177 #define TG3_MAC_LOOPBACK        0
11178 #define TG3_PHY_LOOPBACK        1
11179 #define TG3_TSO_LOOPBACK        2
11180
11181 #define TG3_TSO_MSS             500
11182
11183 #define TG3_TSO_IP_HDR_LEN      20
11184 #define TG3_TSO_TCP_HDR_LEN     20
11185 #define TG3_TSO_TCP_OPT_LEN     12
11186
11187 static const u8 tg3_tso_header[] = {
11188 0x08, 0x00,
11189 0x45, 0x00, 0x00, 0x00,
11190 0x00, 0x00, 0x40, 0x00,
11191 0x40, 0x06, 0x00, 0x00,
11192 0x0a, 0x00, 0x00, 0x01,
11193 0x0a, 0x00, 0x00, 0x02,
11194 0x0d, 0x00, 0xe0, 0x00,
11195 0x00, 0x00, 0x01, 0x00,
11196 0x00, 0x00, 0x02, 0x00,
11197 0x80, 0x10, 0x10, 0x00,
11198 0x14, 0x09, 0x00, 0x00,
11199 0x01, 0x01, 0x08, 0x0a,
11200 0x11, 0x11, 0x11, 0x11,
11201 0x11, 0x11, 0x11, 0x11,
11202 };
11203
11204 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11205 {
11206         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11207         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11208         struct sk_buff *skb, *rx_skb;
11209         u8 *tx_data;
11210         dma_addr_t map;
11211         int num_pkts, tx_len, rx_len, i, err;
11212         struct tg3_rx_buffer_desc *desc;
11213         struct tg3_napi *tnapi, *rnapi;
11214         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11215
11216         tnapi = &tp->napi[0];
11217         rnapi = &tp->napi[0];
11218         if (tp->irq_cnt > 1) {
11219                 if (tg3_flag(tp, ENABLE_RSS))
11220                         rnapi = &tp->napi[1];
11221                 if (tg3_flag(tp, ENABLE_TSS))
11222                         tnapi = &tp->napi[1];
11223         }
11224         coal_now = tnapi->coal_now | rnapi->coal_now;
11225
11226         if (loopback_mode == TG3_MAC_LOOPBACK) {
11227                 /* HW errata - mac loopback fails in some cases on 5780.
11228                  * Normal traffic and PHY loopback are not affected by
11229                  * errata.  Also, the MAC loopback test is deprecated for
11230                  * all newer ASIC revisions.
11231                  */
11232                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11233                     tg3_flag(tp, CPMU_PRESENT))
11234                         return 0;
11235
11236                 mac_mode = tp->mac_mode &
11237                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11238                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11239                 if (!tg3_flag(tp, 5705_PLUS))
11240                         mac_mode |= MAC_MODE_LINK_POLARITY;
11241                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11242                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11243                 else
11244                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11245                 tw32(MAC_MODE, mac_mode);
11246         } else {
11247                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11248                         tg3_phy_fet_toggle_apd(tp, false);
11249                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11250                 } else
11251                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11252
11253                 tg3_phy_toggle_automdix(tp, 0);
11254
11255                 tg3_writephy(tp, MII_BMCR, val);
11256                 udelay(40);
11257
11258                 mac_mode = tp->mac_mode &
11259                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11260                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11261                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11262                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11263                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11264                         /* The write needs to be flushed for the AC131 */
11265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11266                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11267                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11268                 } else
11269                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11270
11271                 /* reset to prevent losing 1st rx packet intermittently */
11272                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11273                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11274                         udelay(10);
11275                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11276                 }
11277                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11278                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11279                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11280                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11281                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11282                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11283                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11284                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11285                 }
11286                 tw32(MAC_MODE, mac_mode);
11287
11288                 /* Wait for link */
11289                 for (i = 0; i < 100; i++) {
11290                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11291                                 break;
11292                         mdelay(1);
11293                 }
11294         }
11295
11296         err = -EIO;
11297
11298         tx_len = pktsz;
11299         skb = netdev_alloc_skb(tp->dev, tx_len);
11300         if (!skb)
11301                 return -ENOMEM;
11302
11303         tx_data = skb_put(skb, tx_len);
11304         memcpy(tx_data, tp->dev->dev_addr, 6);
11305         memset(tx_data + 6, 0x0, 8);
11306
11307         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11308
11309         if (loopback_mode == TG3_TSO_LOOPBACK) {
11310                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11311
11312                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11313                               TG3_TSO_TCP_OPT_LEN;
11314
11315                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11316                        sizeof(tg3_tso_header));
11317                 mss = TG3_TSO_MSS;
11318
11319                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11320                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11321
11322                 /* Set the total length field in the IP header */
11323                 iph->tot_len = htons((u16)(mss + hdr_len));
11324
11325                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11326                               TXD_FLAG_CPU_POST_DMA);
11327
11328                 if (tg3_flag(tp, HW_TSO_1) ||
11329                     tg3_flag(tp, HW_TSO_2) ||
11330                     tg3_flag(tp, HW_TSO_3)) {
11331                         struct tcphdr *th;
11332                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11333                         th = (struct tcphdr *)&tx_data[val];
11334                         th->check = 0;
11335                 } else
11336                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11337
11338                 if (tg3_flag(tp, HW_TSO_3)) {
11339                         mss |= (hdr_len & 0xc) << 12;
11340                         if (hdr_len & 0x10)
11341                                 base_flags |= 0x00000010;
11342                         base_flags |= (hdr_len & 0x3e0) << 5;
11343                 } else if (tg3_flag(tp, HW_TSO_2))
11344                         mss |= hdr_len << 9;
11345                 else if (tg3_flag(tp, HW_TSO_1) ||
11346                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11347                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11348                 } else {
11349                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11350                 }
11351
11352                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11353         } else {
11354                 num_pkts = 1;
11355                 data_off = ETH_HLEN;
11356         }
11357
11358         for (i = data_off; i < tx_len; i++)
11359                 tx_data[i] = (u8) (i & 0xff);
11360
11361         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11362         if (pci_dma_mapping_error(tp->pdev, map)) {
11363                 dev_kfree_skb(skb);
11364                 return -EIO;
11365         }
11366
11367         val = tnapi->tx_prod;
11368         tnapi->tx_buffers[val].skb = skb;
11369         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11370
11371         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11372                rnapi->coal_now);
11373
11374         udelay(10);
11375
11376         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11377
11378         tg3_tx_set_bd(tnapi, tnapi->tx_prod, map, tx_len,
11379                       base_flags | TXD_FLAG_END, mss, 0);
11380
11381         tnapi->tx_prod++;
11382
11383         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11384         tr32_mailbox(tnapi->prodmbox);
11385
11386         udelay(10);
11387
11388         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11389         for (i = 0; i < 35; i++) {
11390                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11391                        coal_now);
11392
11393                 udelay(10);
11394
11395                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11396                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11397                 if ((tx_idx == tnapi->tx_prod) &&
11398                     (rx_idx == (rx_start_idx + num_pkts)))
11399                         break;
11400         }
11401
11402         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11403         dev_kfree_skb(skb);
11404
11405         if (tx_idx != tnapi->tx_prod)
11406                 goto out;
11407
11408         if (rx_idx != rx_start_idx + num_pkts)
11409                 goto out;
11410
11411         val = data_off;
11412         while (rx_idx != rx_start_idx) {
11413                 desc = &rnapi->rx_rcb[rx_start_idx++];
11414                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11415                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11416
11417                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11418                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11419                         goto out;
11420
11421                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11422                          - ETH_FCS_LEN;
11423
11424                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11425                         if (rx_len != tx_len)
11426                                 goto out;
11427
11428                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11429                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11430                                         goto out;
11431                         } else {
11432                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11433                                         goto out;
11434                         }
11435                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11436                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11437                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11438                         goto out;
11439                 }
11440
11441                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11442                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11443                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11444                                              mapping);
11445                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11446                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11447                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11448                                              mapping);
11449                 } else
11450                         goto out;
11451
11452                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11453                                             PCI_DMA_FROMDEVICE);
11454
11455                 for (i = data_off; i < rx_len; i++, val++) {
11456                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11457                                 goto out;
11458                 }
11459         }
11460
11461         err = 0;
11462
11463         /* tg3_free_rings will unmap and free the rx_skb */
11464 out:
11465         return err;
11466 }
11467
11468 #define TG3_STD_LOOPBACK_FAILED         1
11469 #define TG3_JMB_LOOPBACK_FAILED         2
11470 #define TG3_TSO_LOOPBACK_FAILED         4
11471
11472 #define TG3_MAC_LOOPBACK_SHIFT          0
11473 #define TG3_PHY_LOOPBACK_SHIFT          4
11474 #define TG3_LOOPBACK_FAILED             0x00000077
11475
11476 static int tg3_test_loopback(struct tg3 *tp)
11477 {
11478         int err = 0;
11479         u32 eee_cap, cpmuctrl = 0;
11480
11481         if (!netif_running(tp->dev))
11482                 return TG3_LOOPBACK_FAILED;
11483
11484         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11485         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11486
11487         err = tg3_reset_hw(tp, 1);
11488         if (err) {
11489                 err = TG3_LOOPBACK_FAILED;
11490                 goto done;
11491         }
11492
11493         if (tg3_flag(tp, ENABLE_RSS)) {
11494                 int i;
11495
11496                 /* Reroute all rx packets to the 1st queue */
11497                 for (i = MAC_RSS_INDIR_TBL_0;
11498                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11499                         tw32(i, 0x0);
11500         }
11501
11502         /* Turn off gphy autopowerdown. */
11503         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11504                 tg3_phy_toggle_apd(tp, false);
11505
11506         if (tg3_flag(tp, CPMU_PRESENT)) {
11507                 int i;
11508                 u32 status;
11509
11510                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11511
11512                 /* Wait for up to 40 microseconds to acquire lock. */
11513                 for (i = 0; i < 4; i++) {
11514                         status = tr32(TG3_CPMU_MUTEX_GNT);
11515                         if (status == CPMU_MUTEX_GNT_DRIVER)
11516                                 break;
11517                         udelay(10);
11518                 }
11519
11520                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11521                         err = TG3_LOOPBACK_FAILED;
11522                         goto done;
11523                 }
11524
11525                 /* Turn off link-based power management. */
11526                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11527                 tw32(TG3_CPMU_CTRL,
11528                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11529                                   CPMU_CTRL_LINK_AWARE_MODE));
11530         }
11531
11532         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11533                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11534
11535         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11536             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11537                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11538
11539         if (tg3_flag(tp, CPMU_PRESENT)) {
11540                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11541
11542                 /* Release the mutex */
11543                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11544         }
11545
11546         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11547             !tg3_flag(tp, USE_PHYLIB)) {
11548                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11549                         err |= TG3_STD_LOOPBACK_FAILED <<
11550                                TG3_PHY_LOOPBACK_SHIFT;
11551                 if (tg3_flag(tp, TSO_CAPABLE) &&
11552                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11553                         err |= TG3_TSO_LOOPBACK_FAILED <<
11554                                TG3_PHY_LOOPBACK_SHIFT;
11555                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11556                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11557                         err |= TG3_JMB_LOOPBACK_FAILED <<
11558                                TG3_PHY_LOOPBACK_SHIFT;
11559         }
11560
11561         /* Re-enable gphy autopowerdown. */
11562         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11563                 tg3_phy_toggle_apd(tp, true);
11564
11565 done:
11566         tp->phy_flags |= eee_cap;
11567
11568         return err;
11569 }
11570
11571 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11572                           u64 *data)
11573 {
11574         struct tg3 *tp = netdev_priv(dev);
11575
11576         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11577             tg3_power_up(tp)) {
11578                 etest->flags |= ETH_TEST_FL_FAILED;
11579                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11580                 return;
11581         }
11582
11583         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11584
11585         if (tg3_test_nvram(tp) != 0) {
11586                 etest->flags |= ETH_TEST_FL_FAILED;
11587                 data[0] = 1;
11588         }
11589         if (tg3_test_link(tp) != 0) {
11590                 etest->flags |= ETH_TEST_FL_FAILED;
11591                 data[1] = 1;
11592         }
11593         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11594                 int err, err2 = 0, irq_sync = 0;
11595
11596                 if (netif_running(dev)) {
11597                         tg3_phy_stop(tp);
11598                         tg3_netif_stop(tp);
11599                         irq_sync = 1;
11600                 }
11601
11602                 tg3_full_lock(tp, irq_sync);
11603
11604                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11605                 err = tg3_nvram_lock(tp);
11606                 tg3_halt_cpu(tp, RX_CPU_BASE);
11607                 if (!tg3_flag(tp, 5705_PLUS))
11608                         tg3_halt_cpu(tp, TX_CPU_BASE);
11609                 if (!err)
11610                         tg3_nvram_unlock(tp);
11611
11612                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11613                         tg3_phy_reset(tp);
11614
11615                 if (tg3_test_registers(tp) != 0) {
11616                         etest->flags |= ETH_TEST_FL_FAILED;
11617                         data[2] = 1;
11618                 }
11619                 if (tg3_test_memory(tp) != 0) {
11620                         etest->flags |= ETH_TEST_FL_FAILED;
11621                         data[3] = 1;
11622                 }
11623                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11624                         etest->flags |= ETH_TEST_FL_FAILED;
11625
11626                 tg3_full_unlock(tp);
11627
11628                 if (tg3_test_interrupt(tp) != 0) {
11629                         etest->flags |= ETH_TEST_FL_FAILED;
11630                         data[5] = 1;
11631                 }
11632
11633                 tg3_full_lock(tp, 0);
11634
11635                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11636                 if (netif_running(dev)) {
11637                         tg3_flag_set(tp, INIT_COMPLETE);
11638                         err2 = tg3_restart_hw(tp, 1);
11639                         if (!err2)
11640                                 tg3_netif_start(tp);
11641                 }
11642
11643                 tg3_full_unlock(tp);
11644
11645                 if (irq_sync && !err2)
11646                         tg3_phy_start(tp);
11647         }
11648         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11649                 tg3_power_down(tp);
11650
11651 }
11652
11653 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11654 {
11655         struct mii_ioctl_data *data = if_mii(ifr);
11656         struct tg3 *tp = netdev_priv(dev);
11657         int err;
11658
11659         if (tg3_flag(tp, USE_PHYLIB)) {
11660                 struct phy_device *phydev;
11661                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11662                         return -EAGAIN;
11663                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11664                 return phy_mii_ioctl(phydev, ifr, cmd);
11665         }
11666
11667         switch (cmd) {
11668         case SIOCGMIIPHY:
11669                 data->phy_id = tp->phy_addr;
11670
11671                 /* fallthru */
11672         case SIOCGMIIREG: {
11673                 u32 mii_regval;
11674
11675                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11676                         break;                  /* We have no PHY */
11677
11678                 if (!netif_running(dev))
11679                         return -EAGAIN;
11680
11681                 spin_lock_bh(&tp->lock);
11682                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11683                 spin_unlock_bh(&tp->lock);
11684
11685                 data->val_out = mii_regval;
11686
11687                 return err;
11688         }
11689
11690         case SIOCSMIIREG:
11691                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11692                         break;                  /* We have no PHY */
11693
11694                 if (!netif_running(dev))
11695                         return -EAGAIN;
11696
11697                 spin_lock_bh(&tp->lock);
11698                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11699                 spin_unlock_bh(&tp->lock);
11700
11701                 return err;
11702
11703         default:
11704                 /* do nothing */
11705                 break;
11706         }
11707         return -EOPNOTSUPP;
11708 }
11709
11710 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11711 {
11712         struct tg3 *tp = netdev_priv(dev);
11713
11714         memcpy(ec, &tp->coal, sizeof(*ec));
11715         return 0;
11716 }
11717
11718 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11719 {
11720         struct tg3 *tp = netdev_priv(dev);
11721         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11722         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11723
11724         if (!tg3_flag(tp, 5705_PLUS)) {
11725                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11726                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11727                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11728                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11729         }
11730
11731         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11732             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11733             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11734             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11735             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11736             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11737             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11738             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11739             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11740             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11741                 return -EINVAL;
11742
11743         /* No rx interrupts will be generated if both are zero */
11744         if ((ec->rx_coalesce_usecs == 0) &&
11745             (ec->rx_max_coalesced_frames == 0))
11746                 return -EINVAL;
11747
11748         /* No tx interrupts will be generated if both are zero */
11749         if ((ec->tx_coalesce_usecs == 0) &&
11750             (ec->tx_max_coalesced_frames == 0))
11751                 return -EINVAL;
11752
11753         /* Only copy relevant parameters, ignore all others. */
11754         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11755         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11756         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11757         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11758         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11759         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11760         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11761         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11762         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11763
11764         if (netif_running(dev)) {
11765                 tg3_full_lock(tp, 0);
11766                 __tg3_set_coalesce(tp, &tp->coal);
11767                 tg3_full_unlock(tp);
11768         }
11769         return 0;
11770 }
11771
11772 static const struct ethtool_ops tg3_ethtool_ops = {
11773         .get_settings           = tg3_get_settings,
11774         .set_settings           = tg3_set_settings,
11775         .get_drvinfo            = tg3_get_drvinfo,
11776         .get_regs_len           = tg3_get_regs_len,
11777         .get_regs               = tg3_get_regs,
11778         .get_wol                = tg3_get_wol,
11779         .set_wol                = tg3_set_wol,
11780         .get_msglevel           = tg3_get_msglevel,
11781         .set_msglevel           = tg3_set_msglevel,
11782         .nway_reset             = tg3_nway_reset,
11783         .get_link               = ethtool_op_get_link,
11784         .get_eeprom_len         = tg3_get_eeprom_len,
11785         .get_eeprom             = tg3_get_eeprom,
11786         .set_eeprom             = tg3_set_eeprom,
11787         .get_ringparam          = tg3_get_ringparam,
11788         .set_ringparam          = tg3_set_ringparam,
11789         .get_pauseparam         = tg3_get_pauseparam,
11790         .set_pauseparam         = tg3_set_pauseparam,
11791         .self_test              = tg3_self_test,
11792         .get_strings            = tg3_get_strings,
11793         .set_phys_id            = tg3_set_phys_id,
11794         .get_ethtool_stats      = tg3_get_ethtool_stats,
11795         .get_coalesce           = tg3_get_coalesce,
11796         .set_coalesce           = tg3_set_coalesce,
11797         .get_sset_count         = tg3_get_sset_count,
11798 };
11799
11800 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11801 {
11802         u32 cursize, val, magic;
11803
11804         tp->nvram_size = EEPROM_CHIP_SIZE;
11805
11806         if (tg3_nvram_read(tp, 0, &magic) != 0)
11807                 return;
11808
11809         if ((magic != TG3_EEPROM_MAGIC) &&
11810             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11811             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11812                 return;
11813
11814         /*
11815          * Size the chip by reading offsets at increasing powers of two.
11816          * When we encounter our validation signature, we know the addressing
11817          * has wrapped around, and thus have our chip size.
11818          */
11819         cursize = 0x10;
11820
11821         while (cursize < tp->nvram_size) {
11822                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11823                         return;
11824
11825                 if (val == magic)
11826                         break;
11827
11828                 cursize <<= 1;
11829         }
11830
11831         tp->nvram_size = cursize;
11832 }
11833
11834 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11835 {
11836         u32 val;
11837
11838         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11839                 return;
11840
11841         /* Selfboot format */
11842         if (val != TG3_EEPROM_MAGIC) {
11843                 tg3_get_eeprom_size(tp);
11844                 return;
11845         }
11846
11847         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11848                 if (val != 0) {
11849                         /* This is confusing.  We want to operate on the
11850                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11851                          * call will read from NVRAM and byteswap the data
11852                          * according to the byteswapping settings for all
11853                          * other register accesses.  This ensures the data we
11854                          * want will always reside in the lower 16-bits.
11855                          * However, the data in NVRAM is in LE format, which
11856                          * means the data from the NVRAM read will always be
11857                          * opposite the endianness of the CPU.  The 16-bit
11858                          * byteswap then brings the data to CPU endianness.
11859                          */
11860                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11861                         return;
11862                 }
11863         }
11864         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11865 }
11866
11867 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11868 {
11869         u32 nvcfg1;
11870
11871         nvcfg1 = tr32(NVRAM_CFG1);
11872         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11873                 tg3_flag_set(tp, FLASH);
11874         } else {
11875                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11876                 tw32(NVRAM_CFG1, nvcfg1);
11877         }
11878
11879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11880             tg3_flag(tp, 5780_CLASS)) {
11881                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11882                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11883                         tp->nvram_jedecnum = JEDEC_ATMEL;
11884                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11885                         tg3_flag_set(tp, NVRAM_BUFFERED);
11886                         break;
11887                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11888                         tp->nvram_jedecnum = JEDEC_ATMEL;
11889                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11890                         break;
11891                 case FLASH_VENDOR_ATMEL_EEPROM:
11892                         tp->nvram_jedecnum = JEDEC_ATMEL;
11893                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11894                         tg3_flag_set(tp, NVRAM_BUFFERED);
11895                         break;
11896                 case FLASH_VENDOR_ST:
11897                         tp->nvram_jedecnum = JEDEC_ST;
11898                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11899                         tg3_flag_set(tp, NVRAM_BUFFERED);
11900                         break;
11901                 case FLASH_VENDOR_SAIFUN:
11902                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11903                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11904                         break;
11905                 case FLASH_VENDOR_SST_SMALL:
11906                 case FLASH_VENDOR_SST_LARGE:
11907                         tp->nvram_jedecnum = JEDEC_SST;
11908                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11909                         break;
11910                 }
11911         } else {
11912                 tp->nvram_jedecnum = JEDEC_ATMEL;
11913                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11914                 tg3_flag_set(tp, NVRAM_BUFFERED);
11915         }
11916 }
11917
11918 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11919 {
11920         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11921         case FLASH_5752PAGE_SIZE_256:
11922                 tp->nvram_pagesize = 256;
11923                 break;
11924         case FLASH_5752PAGE_SIZE_512:
11925                 tp->nvram_pagesize = 512;
11926                 break;
11927         case FLASH_5752PAGE_SIZE_1K:
11928                 tp->nvram_pagesize = 1024;
11929                 break;
11930         case FLASH_5752PAGE_SIZE_2K:
11931                 tp->nvram_pagesize = 2048;
11932                 break;
11933         case FLASH_5752PAGE_SIZE_4K:
11934                 tp->nvram_pagesize = 4096;
11935                 break;
11936         case FLASH_5752PAGE_SIZE_264:
11937                 tp->nvram_pagesize = 264;
11938                 break;
11939         case FLASH_5752PAGE_SIZE_528:
11940                 tp->nvram_pagesize = 528;
11941                 break;
11942         }
11943 }
11944
11945 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11946 {
11947         u32 nvcfg1;
11948
11949         nvcfg1 = tr32(NVRAM_CFG1);
11950
11951         /* NVRAM protection for TPM */
11952         if (nvcfg1 & (1 << 27))
11953                 tg3_flag_set(tp, PROTECTED_NVRAM);
11954
11955         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11956         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11957         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11958                 tp->nvram_jedecnum = JEDEC_ATMEL;
11959                 tg3_flag_set(tp, NVRAM_BUFFERED);
11960                 break;
11961         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11962                 tp->nvram_jedecnum = JEDEC_ATMEL;
11963                 tg3_flag_set(tp, NVRAM_BUFFERED);
11964                 tg3_flag_set(tp, FLASH);
11965                 break;
11966         case FLASH_5752VENDOR_ST_M45PE10:
11967         case FLASH_5752VENDOR_ST_M45PE20:
11968         case FLASH_5752VENDOR_ST_M45PE40:
11969                 tp->nvram_jedecnum = JEDEC_ST;
11970                 tg3_flag_set(tp, NVRAM_BUFFERED);
11971                 tg3_flag_set(tp, FLASH);
11972                 break;
11973         }
11974
11975         if (tg3_flag(tp, FLASH)) {
11976                 tg3_nvram_get_pagesize(tp, nvcfg1);
11977         } else {
11978                 /* For eeprom, set pagesize to maximum eeprom size */
11979                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11980
11981                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11982                 tw32(NVRAM_CFG1, nvcfg1);
11983         }
11984 }
11985
11986 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11987 {
11988         u32 nvcfg1, protect = 0;
11989
11990         nvcfg1 = tr32(NVRAM_CFG1);
11991
11992         /* NVRAM protection for TPM */
11993         if (nvcfg1 & (1 << 27)) {
11994                 tg3_flag_set(tp, PROTECTED_NVRAM);
11995                 protect = 1;
11996         }
11997
11998         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11999         switch (nvcfg1) {
12000         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12001         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12002         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12003         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12004                 tp->nvram_jedecnum = JEDEC_ATMEL;
12005                 tg3_flag_set(tp, NVRAM_BUFFERED);
12006                 tg3_flag_set(tp, FLASH);
12007                 tp->nvram_pagesize = 264;
12008                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12009                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12010                         tp->nvram_size = (protect ? 0x3e200 :
12011                                           TG3_NVRAM_SIZE_512KB);
12012                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12013                         tp->nvram_size = (protect ? 0x1f200 :
12014                                           TG3_NVRAM_SIZE_256KB);
12015                 else
12016                         tp->nvram_size = (protect ? 0x1f200 :
12017                                           TG3_NVRAM_SIZE_128KB);
12018                 break;
12019         case FLASH_5752VENDOR_ST_M45PE10:
12020         case FLASH_5752VENDOR_ST_M45PE20:
12021         case FLASH_5752VENDOR_ST_M45PE40:
12022                 tp->nvram_jedecnum = JEDEC_ST;
12023                 tg3_flag_set(tp, NVRAM_BUFFERED);
12024                 tg3_flag_set(tp, FLASH);
12025                 tp->nvram_pagesize = 256;
12026                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12027                         tp->nvram_size = (protect ?
12028                                           TG3_NVRAM_SIZE_64KB :
12029                                           TG3_NVRAM_SIZE_128KB);
12030                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12031                         tp->nvram_size = (protect ?
12032                                           TG3_NVRAM_SIZE_64KB :
12033                                           TG3_NVRAM_SIZE_256KB);
12034                 else
12035                         tp->nvram_size = (protect ?
12036                                           TG3_NVRAM_SIZE_128KB :
12037                                           TG3_NVRAM_SIZE_512KB);
12038                 break;
12039         }
12040 }
12041
12042 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12043 {
12044         u32 nvcfg1;
12045
12046         nvcfg1 = tr32(NVRAM_CFG1);
12047
12048         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12049         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12050         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12051         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12052         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12053                 tp->nvram_jedecnum = JEDEC_ATMEL;
12054                 tg3_flag_set(tp, NVRAM_BUFFERED);
12055                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12056
12057                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12058                 tw32(NVRAM_CFG1, nvcfg1);
12059                 break;
12060         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12061         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12062         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12063         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12064                 tp->nvram_jedecnum = JEDEC_ATMEL;
12065                 tg3_flag_set(tp, NVRAM_BUFFERED);
12066                 tg3_flag_set(tp, FLASH);
12067                 tp->nvram_pagesize = 264;
12068                 break;
12069         case FLASH_5752VENDOR_ST_M45PE10:
12070         case FLASH_5752VENDOR_ST_M45PE20:
12071         case FLASH_5752VENDOR_ST_M45PE40:
12072                 tp->nvram_jedecnum = JEDEC_ST;
12073                 tg3_flag_set(tp, NVRAM_BUFFERED);
12074                 tg3_flag_set(tp, FLASH);
12075                 tp->nvram_pagesize = 256;
12076                 break;
12077         }
12078 }
12079
12080 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12081 {
12082         u32 nvcfg1, protect = 0;
12083
12084         nvcfg1 = tr32(NVRAM_CFG1);
12085
12086         /* NVRAM protection for TPM */
12087         if (nvcfg1 & (1 << 27)) {
12088                 tg3_flag_set(tp, PROTECTED_NVRAM);
12089                 protect = 1;
12090         }
12091
12092         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12093         switch (nvcfg1) {
12094         case FLASH_5761VENDOR_ATMEL_ADB021D:
12095         case FLASH_5761VENDOR_ATMEL_ADB041D:
12096         case FLASH_5761VENDOR_ATMEL_ADB081D:
12097         case FLASH_5761VENDOR_ATMEL_ADB161D:
12098         case FLASH_5761VENDOR_ATMEL_MDB021D:
12099         case FLASH_5761VENDOR_ATMEL_MDB041D:
12100         case FLASH_5761VENDOR_ATMEL_MDB081D:
12101         case FLASH_5761VENDOR_ATMEL_MDB161D:
12102                 tp->nvram_jedecnum = JEDEC_ATMEL;
12103                 tg3_flag_set(tp, NVRAM_BUFFERED);
12104                 tg3_flag_set(tp, FLASH);
12105                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12106                 tp->nvram_pagesize = 256;
12107                 break;
12108         case FLASH_5761VENDOR_ST_A_M45PE20:
12109         case FLASH_5761VENDOR_ST_A_M45PE40:
12110         case FLASH_5761VENDOR_ST_A_M45PE80:
12111         case FLASH_5761VENDOR_ST_A_M45PE16:
12112         case FLASH_5761VENDOR_ST_M_M45PE20:
12113         case FLASH_5761VENDOR_ST_M_M45PE40:
12114         case FLASH_5761VENDOR_ST_M_M45PE80:
12115         case FLASH_5761VENDOR_ST_M_M45PE16:
12116                 tp->nvram_jedecnum = JEDEC_ST;
12117                 tg3_flag_set(tp, NVRAM_BUFFERED);
12118                 tg3_flag_set(tp, FLASH);
12119                 tp->nvram_pagesize = 256;
12120                 break;
12121         }
12122
12123         if (protect) {
12124                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12125         } else {
12126                 switch (nvcfg1) {
12127                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12128                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12129                 case FLASH_5761VENDOR_ST_A_M45PE16:
12130                 case FLASH_5761VENDOR_ST_M_M45PE16:
12131                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12132                         break;
12133                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12134                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12135                 case FLASH_5761VENDOR_ST_A_M45PE80:
12136                 case FLASH_5761VENDOR_ST_M_M45PE80:
12137                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12138                         break;
12139                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12140                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12141                 case FLASH_5761VENDOR_ST_A_M45PE40:
12142                 case FLASH_5761VENDOR_ST_M_M45PE40:
12143                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12144                         break;
12145                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12146                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12147                 case FLASH_5761VENDOR_ST_A_M45PE20:
12148                 case FLASH_5761VENDOR_ST_M_M45PE20:
12149                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12150                         break;
12151                 }
12152         }
12153 }
12154
12155 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12156 {
12157         tp->nvram_jedecnum = JEDEC_ATMEL;
12158         tg3_flag_set(tp, NVRAM_BUFFERED);
12159         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12160 }
12161
12162 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12163 {
12164         u32 nvcfg1;
12165
12166         nvcfg1 = tr32(NVRAM_CFG1);
12167
12168         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12169         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12170         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12171                 tp->nvram_jedecnum = JEDEC_ATMEL;
12172                 tg3_flag_set(tp, NVRAM_BUFFERED);
12173                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12174
12175                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12176                 tw32(NVRAM_CFG1, nvcfg1);
12177                 return;
12178         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12179         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12180         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12181         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12182         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12183         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12184         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12185                 tp->nvram_jedecnum = JEDEC_ATMEL;
12186                 tg3_flag_set(tp, NVRAM_BUFFERED);
12187                 tg3_flag_set(tp, FLASH);
12188
12189                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12190                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12191                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12192                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12193                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12194                         break;
12195                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12196                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12197                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12198                         break;
12199                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12200                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12201                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12202                         break;
12203                 }
12204                 break;
12205         case FLASH_5752VENDOR_ST_M45PE10:
12206         case FLASH_5752VENDOR_ST_M45PE20:
12207         case FLASH_5752VENDOR_ST_M45PE40:
12208                 tp->nvram_jedecnum = JEDEC_ST;
12209                 tg3_flag_set(tp, NVRAM_BUFFERED);
12210                 tg3_flag_set(tp, FLASH);
12211
12212                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12213                 case FLASH_5752VENDOR_ST_M45PE10:
12214                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12215                         break;
12216                 case FLASH_5752VENDOR_ST_M45PE20:
12217                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12218                         break;
12219                 case FLASH_5752VENDOR_ST_M45PE40:
12220                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12221                         break;
12222                 }
12223                 break;
12224         default:
12225                 tg3_flag_set(tp, NO_NVRAM);
12226                 return;
12227         }
12228
12229         tg3_nvram_get_pagesize(tp, nvcfg1);
12230         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12231                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12232 }
12233
12234
12235 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12236 {
12237         u32 nvcfg1;
12238
12239         nvcfg1 = tr32(NVRAM_CFG1);
12240
12241         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12242         case FLASH_5717VENDOR_ATMEL_EEPROM:
12243         case FLASH_5717VENDOR_MICRO_EEPROM:
12244                 tp->nvram_jedecnum = JEDEC_ATMEL;
12245                 tg3_flag_set(tp, NVRAM_BUFFERED);
12246                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12247
12248                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12249                 tw32(NVRAM_CFG1, nvcfg1);
12250                 return;
12251         case FLASH_5717VENDOR_ATMEL_MDB011D:
12252         case FLASH_5717VENDOR_ATMEL_ADB011B:
12253         case FLASH_5717VENDOR_ATMEL_ADB011D:
12254         case FLASH_5717VENDOR_ATMEL_MDB021D:
12255         case FLASH_5717VENDOR_ATMEL_ADB021B:
12256         case FLASH_5717VENDOR_ATMEL_ADB021D:
12257         case FLASH_5717VENDOR_ATMEL_45USPT:
12258                 tp->nvram_jedecnum = JEDEC_ATMEL;
12259                 tg3_flag_set(tp, NVRAM_BUFFERED);
12260                 tg3_flag_set(tp, FLASH);
12261
12262                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12263                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12264                         /* Detect size with tg3_nvram_get_size() */
12265                         break;
12266                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12267                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12268                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12269                         break;
12270                 default:
12271                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12272                         break;
12273                 }
12274                 break;
12275         case FLASH_5717VENDOR_ST_M_M25PE10:
12276         case FLASH_5717VENDOR_ST_A_M25PE10:
12277         case FLASH_5717VENDOR_ST_M_M45PE10:
12278         case FLASH_5717VENDOR_ST_A_M45PE10:
12279         case FLASH_5717VENDOR_ST_M_M25PE20:
12280         case FLASH_5717VENDOR_ST_A_M25PE20:
12281         case FLASH_5717VENDOR_ST_M_M45PE20:
12282         case FLASH_5717VENDOR_ST_A_M45PE20:
12283         case FLASH_5717VENDOR_ST_25USPT:
12284         case FLASH_5717VENDOR_ST_45USPT:
12285                 tp->nvram_jedecnum = JEDEC_ST;
12286                 tg3_flag_set(tp, NVRAM_BUFFERED);
12287                 tg3_flag_set(tp, FLASH);
12288
12289                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12290                 case FLASH_5717VENDOR_ST_M_M25PE20:
12291                 case FLASH_5717VENDOR_ST_M_M45PE20:
12292                         /* Detect size with tg3_nvram_get_size() */
12293                         break;
12294                 case FLASH_5717VENDOR_ST_A_M25PE20:
12295                 case FLASH_5717VENDOR_ST_A_M45PE20:
12296                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12297                         break;
12298                 default:
12299                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12300                         break;
12301                 }
12302                 break;
12303         default:
12304                 tg3_flag_set(tp, NO_NVRAM);
12305                 return;
12306         }
12307
12308         tg3_nvram_get_pagesize(tp, nvcfg1);
12309         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12310                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12311 }
12312
12313 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12314 {
12315         u32 nvcfg1, nvmpinstrp;
12316
12317         nvcfg1 = tr32(NVRAM_CFG1);
12318         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12319
12320         switch (nvmpinstrp) {
12321         case FLASH_5720_EEPROM_HD:
12322         case FLASH_5720_EEPROM_LD:
12323                 tp->nvram_jedecnum = JEDEC_ATMEL;
12324                 tg3_flag_set(tp, NVRAM_BUFFERED);
12325
12326                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12327                 tw32(NVRAM_CFG1, nvcfg1);
12328                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12329                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12330                 else
12331                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12332                 return;
12333         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12334         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12335         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12336         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12337         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12338         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12339         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12340         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12341         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12342         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12343         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12344         case FLASH_5720VENDOR_ATMEL_45USPT:
12345                 tp->nvram_jedecnum = JEDEC_ATMEL;
12346                 tg3_flag_set(tp, NVRAM_BUFFERED);
12347                 tg3_flag_set(tp, FLASH);
12348
12349                 switch (nvmpinstrp) {
12350                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12351                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12352                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12353                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12354                         break;
12355                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12356                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12357                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12358                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12359                         break;
12360                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12361                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12362                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12363                         break;
12364                 default:
12365                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12366                         break;
12367                 }
12368                 break;
12369         case FLASH_5720VENDOR_M_ST_M25PE10:
12370         case FLASH_5720VENDOR_M_ST_M45PE10:
12371         case FLASH_5720VENDOR_A_ST_M25PE10:
12372         case FLASH_5720VENDOR_A_ST_M45PE10:
12373         case FLASH_5720VENDOR_M_ST_M25PE20:
12374         case FLASH_5720VENDOR_M_ST_M45PE20:
12375         case FLASH_5720VENDOR_A_ST_M25PE20:
12376         case FLASH_5720VENDOR_A_ST_M45PE20:
12377         case FLASH_5720VENDOR_M_ST_M25PE40:
12378         case FLASH_5720VENDOR_M_ST_M45PE40:
12379         case FLASH_5720VENDOR_A_ST_M25PE40:
12380         case FLASH_5720VENDOR_A_ST_M45PE40:
12381         case FLASH_5720VENDOR_M_ST_M25PE80:
12382         case FLASH_5720VENDOR_M_ST_M45PE80:
12383         case FLASH_5720VENDOR_A_ST_M25PE80:
12384         case FLASH_5720VENDOR_A_ST_M45PE80:
12385         case FLASH_5720VENDOR_ST_25USPT:
12386         case FLASH_5720VENDOR_ST_45USPT:
12387                 tp->nvram_jedecnum = JEDEC_ST;
12388                 tg3_flag_set(tp, NVRAM_BUFFERED);
12389                 tg3_flag_set(tp, FLASH);
12390
12391                 switch (nvmpinstrp) {
12392                 case FLASH_5720VENDOR_M_ST_M25PE20:
12393                 case FLASH_5720VENDOR_M_ST_M45PE20:
12394                 case FLASH_5720VENDOR_A_ST_M25PE20:
12395                 case FLASH_5720VENDOR_A_ST_M45PE20:
12396                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12397                         break;
12398                 case FLASH_5720VENDOR_M_ST_M25PE40:
12399                 case FLASH_5720VENDOR_M_ST_M45PE40:
12400                 case FLASH_5720VENDOR_A_ST_M25PE40:
12401                 case FLASH_5720VENDOR_A_ST_M45PE40:
12402                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12403                         break;
12404                 case FLASH_5720VENDOR_M_ST_M25PE80:
12405                 case FLASH_5720VENDOR_M_ST_M45PE80:
12406                 case FLASH_5720VENDOR_A_ST_M25PE80:
12407                 case FLASH_5720VENDOR_A_ST_M45PE80:
12408                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12409                         break;
12410                 default:
12411                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12412                         break;
12413                 }
12414                 break;
12415         default:
12416                 tg3_flag_set(tp, NO_NVRAM);
12417                 return;
12418         }
12419
12420         tg3_nvram_get_pagesize(tp, nvcfg1);
12421         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12422                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12423 }
12424
12425 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12426 static void __devinit tg3_nvram_init(struct tg3 *tp)
12427 {
12428         tw32_f(GRC_EEPROM_ADDR,
12429              (EEPROM_ADDR_FSM_RESET |
12430               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12431                EEPROM_ADDR_CLKPERD_SHIFT)));
12432
12433         msleep(1);
12434
12435         /* Enable seeprom accesses. */
12436         tw32_f(GRC_LOCAL_CTRL,
12437              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12438         udelay(100);
12439
12440         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12441             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12442                 tg3_flag_set(tp, NVRAM);
12443
12444                 if (tg3_nvram_lock(tp)) {
12445                         netdev_warn(tp->dev,
12446                                     "Cannot get nvram lock, %s failed\n",
12447                                     __func__);
12448                         return;
12449                 }
12450                 tg3_enable_nvram_access(tp);
12451
12452                 tp->nvram_size = 0;
12453
12454                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12455                         tg3_get_5752_nvram_info(tp);
12456                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12457                         tg3_get_5755_nvram_info(tp);
12458                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12459                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12460                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12461                         tg3_get_5787_nvram_info(tp);
12462                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12463                         tg3_get_5761_nvram_info(tp);
12464                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12465                         tg3_get_5906_nvram_info(tp);
12466                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12467                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12468                         tg3_get_57780_nvram_info(tp);
12469                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12470                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12471                         tg3_get_5717_nvram_info(tp);
12472                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12473                         tg3_get_5720_nvram_info(tp);
12474                 else
12475                         tg3_get_nvram_info(tp);
12476
12477                 if (tp->nvram_size == 0)
12478                         tg3_get_nvram_size(tp);
12479
12480                 tg3_disable_nvram_access(tp);
12481                 tg3_nvram_unlock(tp);
12482
12483         } else {
12484                 tg3_flag_clear(tp, NVRAM);
12485                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12486
12487                 tg3_get_eeprom_size(tp);
12488         }
12489 }
12490
12491 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12492                                     u32 offset, u32 len, u8 *buf)
12493 {
12494         int i, j, rc = 0;
12495         u32 val;
12496
12497         for (i = 0; i < len; i += 4) {
12498                 u32 addr;
12499                 __be32 data;
12500
12501                 addr = offset + i;
12502
12503                 memcpy(&data, buf + i, 4);
12504
12505                 /*
12506                  * The SEEPROM interface expects the data to always be opposite
12507                  * the native endian format.  We accomplish this by reversing
12508                  * all the operations that would have been performed on the
12509                  * data from a call to tg3_nvram_read_be32().
12510                  */
12511                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12512
12513                 val = tr32(GRC_EEPROM_ADDR);
12514                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12515
12516                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12517                         EEPROM_ADDR_READ);
12518                 tw32(GRC_EEPROM_ADDR, val |
12519                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12520                         (addr & EEPROM_ADDR_ADDR_MASK) |
12521                         EEPROM_ADDR_START |
12522                         EEPROM_ADDR_WRITE);
12523
12524                 for (j = 0; j < 1000; j++) {
12525                         val = tr32(GRC_EEPROM_ADDR);
12526
12527                         if (val & EEPROM_ADDR_COMPLETE)
12528                                 break;
12529                         msleep(1);
12530                 }
12531                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12532                         rc = -EBUSY;
12533                         break;
12534                 }
12535         }
12536
12537         return rc;
12538 }
12539
12540 /* offset and length are dword aligned */
12541 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12542                 u8 *buf)
12543 {
12544         int ret = 0;
12545         u32 pagesize = tp->nvram_pagesize;
12546         u32 pagemask = pagesize - 1;
12547         u32 nvram_cmd;
12548         u8 *tmp;
12549
12550         tmp = kmalloc(pagesize, GFP_KERNEL);
12551         if (tmp == NULL)
12552                 return -ENOMEM;
12553
12554         while (len) {
12555                 int j;
12556                 u32 phy_addr, page_off, size;
12557
12558                 phy_addr = offset & ~pagemask;
12559
12560                 for (j = 0; j < pagesize; j += 4) {
12561                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12562                                                   (__be32 *) (tmp + j));
12563                         if (ret)
12564                                 break;
12565                 }
12566                 if (ret)
12567                         break;
12568
12569                 page_off = offset & pagemask;
12570                 size = pagesize;
12571                 if (len < size)
12572                         size = len;
12573
12574                 len -= size;
12575
12576                 memcpy(tmp + page_off, buf, size);
12577
12578                 offset = offset + (pagesize - page_off);
12579
12580                 tg3_enable_nvram_access(tp);
12581
12582                 /*
12583                  * Before we can erase the flash page, we need
12584                  * to issue a special "write enable" command.
12585                  */
12586                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12587
12588                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12589                         break;
12590
12591                 /* Erase the target page */
12592                 tw32(NVRAM_ADDR, phy_addr);
12593
12594                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12595                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12596
12597                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12598                         break;
12599
12600                 /* Issue another write enable to start the write. */
12601                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12602
12603                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12604                         break;
12605
12606                 for (j = 0; j < pagesize; j += 4) {
12607                         __be32 data;
12608
12609                         data = *((__be32 *) (tmp + j));
12610
12611                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12612
12613                         tw32(NVRAM_ADDR, phy_addr + j);
12614
12615                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12616                                 NVRAM_CMD_WR;
12617
12618                         if (j == 0)
12619                                 nvram_cmd |= NVRAM_CMD_FIRST;
12620                         else if (j == (pagesize - 4))
12621                                 nvram_cmd |= NVRAM_CMD_LAST;
12622
12623                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12624                                 break;
12625                 }
12626                 if (ret)
12627                         break;
12628         }
12629
12630         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12631         tg3_nvram_exec_cmd(tp, nvram_cmd);
12632
12633         kfree(tmp);
12634
12635         return ret;
12636 }
12637
12638 /* offset and length are dword aligned */
12639 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12640                 u8 *buf)
12641 {
12642         int i, ret = 0;
12643
12644         for (i = 0; i < len; i += 4, offset += 4) {
12645                 u32 page_off, phy_addr, nvram_cmd;
12646                 __be32 data;
12647
12648                 memcpy(&data, buf + i, 4);
12649                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12650
12651                 page_off = offset % tp->nvram_pagesize;
12652
12653                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12654
12655                 tw32(NVRAM_ADDR, phy_addr);
12656
12657                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12658
12659                 if (page_off == 0 || i == 0)
12660                         nvram_cmd |= NVRAM_CMD_FIRST;
12661                 if (page_off == (tp->nvram_pagesize - 4))
12662                         nvram_cmd |= NVRAM_CMD_LAST;
12663
12664                 if (i == (len - 4))
12665                         nvram_cmd |= NVRAM_CMD_LAST;
12666
12667                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12668                     !tg3_flag(tp, 5755_PLUS) &&
12669                     (tp->nvram_jedecnum == JEDEC_ST) &&
12670                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12671
12672                         if ((ret = tg3_nvram_exec_cmd(tp,
12673                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12674                                 NVRAM_CMD_DONE)))
12675
12676                                 break;
12677                 }
12678                 if (!tg3_flag(tp, FLASH)) {
12679                         /* We always do complete word writes to eeprom. */
12680                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12681                 }
12682
12683                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12684                         break;
12685         }
12686         return ret;
12687 }
12688
12689 /* offset and length are dword aligned */
12690 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12691 {
12692         int ret;
12693
12694         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12695                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12696                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12697                 udelay(40);
12698         }
12699
12700         if (!tg3_flag(tp, NVRAM)) {
12701                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12702         } else {
12703                 u32 grc_mode;
12704
12705                 ret = tg3_nvram_lock(tp);
12706                 if (ret)
12707                         return ret;
12708
12709                 tg3_enable_nvram_access(tp);
12710                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12711                         tw32(NVRAM_WRITE1, 0x406);
12712
12713                 grc_mode = tr32(GRC_MODE);
12714                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12715
12716                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12717                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12718                                 buf);
12719                 } else {
12720                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12721                                 buf);
12722                 }
12723
12724                 grc_mode = tr32(GRC_MODE);
12725                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12726
12727                 tg3_disable_nvram_access(tp);
12728                 tg3_nvram_unlock(tp);
12729         }
12730
12731         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12732                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12733                 udelay(40);
12734         }
12735
12736         return ret;
12737 }
12738
12739 struct subsys_tbl_ent {
12740         u16 subsys_vendor, subsys_devid;
12741         u32 phy_id;
12742 };
12743
12744 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12745         /* Broadcom boards. */
12746         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12747           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12748         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12749           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12750         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12751           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12752         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12753           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12754         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12755           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12756         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12757           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12758         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12759           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12760         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12761           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12762         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12763           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12764         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12765           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12766         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12767           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12768
12769         /* 3com boards. */
12770         { TG3PCI_SUBVENDOR_ID_3COM,
12771           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12772         { TG3PCI_SUBVENDOR_ID_3COM,
12773           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12774         { TG3PCI_SUBVENDOR_ID_3COM,
12775           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12776         { TG3PCI_SUBVENDOR_ID_3COM,
12777           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12778         { TG3PCI_SUBVENDOR_ID_3COM,
12779           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12780
12781         /* DELL boards. */
12782         { TG3PCI_SUBVENDOR_ID_DELL,
12783           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12784         { TG3PCI_SUBVENDOR_ID_DELL,
12785           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12786         { TG3PCI_SUBVENDOR_ID_DELL,
12787           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12788         { TG3PCI_SUBVENDOR_ID_DELL,
12789           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12790
12791         /* Compaq boards. */
12792         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12793           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12794         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12795           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12796         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12797           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12798         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12799           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12800         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12801           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12802
12803         /* IBM boards. */
12804         { TG3PCI_SUBVENDOR_ID_IBM,
12805           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12806 };
12807
12808 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12809 {
12810         int i;
12811
12812         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12813                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12814                      tp->pdev->subsystem_vendor) &&
12815                     (subsys_id_to_phy_id[i].subsys_devid ==
12816                      tp->pdev->subsystem_device))
12817                         return &subsys_id_to_phy_id[i];
12818         }
12819         return NULL;
12820 }
12821
12822 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12823 {
12824         u32 val;
12825
12826         tp->phy_id = TG3_PHY_ID_INVALID;
12827         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12828
12829         /* Assume an onboard device and WOL capable by default.  */
12830         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12831         tg3_flag_set(tp, WOL_CAP);
12832
12833         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12834                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12835                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12836                         tg3_flag_set(tp, IS_NIC);
12837                 }
12838                 val = tr32(VCPU_CFGSHDW);
12839                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12840                         tg3_flag_set(tp, ASPM_WORKAROUND);
12841                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12842                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12843                         tg3_flag_set(tp, WOL_ENABLE);
12844                         device_set_wakeup_enable(&tp->pdev->dev, true);
12845                 }
12846                 goto done;
12847         }
12848
12849         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12850         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12851                 u32 nic_cfg, led_cfg;
12852                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12853                 int eeprom_phy_serdes = 0;
12854
12855                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12856                 tp->nic_sram_data_cfg = nic_cfg;
12857
12858                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12859                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12860                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12861                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12862                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12863                     (ver > 0) && (ver < 0x100))
12864                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12865
12866                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12867                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12868
12869                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12870                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12871                         eeprom_phy_serdes = 1;
12872
12873                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12874                 if (nic_phy_id != 0) {
12875                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12876                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12877
12878                         eeprom_phy_id  = (id1 >> 16) << 10;
12879                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12880                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12881                 } else
12882                         eeprom_phy_id = 0;
12883
12884                 tp->phy_id = eeprom_phy_id;
12885                 if (eeprom_phy_serdes) {
12886                         if (!tg3_flag(tp, 5705_PLUS))
12887                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12888                         else
12889                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12890                 }
12891
12892                 if (tg3_flag(tp, 5750_PLUS))
12893                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12894                                     SHASTA_EXT_LED_MODE_MASK);
12895                 else
12896                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12897
12898                 switch (led_cfg) {
12899                 default:
12900                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12901                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12902                         break;
12903
12904                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12905                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12906                         break;
12907
12908                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12909                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12910
12911                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12912                          * read on some older 5700/5701 bootcode.
12913                          */
12914                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12915                             ASIC_REV_5700 ||
12916                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12917                             ASIC_REV_5701)
12918                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12919
12920                         break;
12921
12922                 case SHASTA_EXT_LED_SHARED:
12923                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12924                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12925                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12926                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12927                                                  LED_CTRL_MODE_PHY_2);
12928                         break;
12929
12930                 case SHASTA_EXT_LED_MAC:
12931                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12932                         break;
12933
12934                 case SHASTA_EXT_LED_COMBO:
12935                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12936                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12937                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12938                                                  LED_CTRL_MODE_PHY_2);
12939                         break;
12940
12941                 }
12942
12943                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12944                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12945                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12946                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12947
12948                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12949                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12950
12951                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12952                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12953                         if ((tp->pdev->subsystem_vendor ==
12954                              PCI_VENDOR_ID_ARIMA) &&
12955                             (tp->pdev->subsystem_device == 0x205a ||
12956                              tp->pdev->subsystem_device == 0x2063))
12957                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12958                 } else {
12959                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12960                         tg3_flag_set(tp, IS_NIC);
12961                 }
12962
12963                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12964                         tg3_flag_set(tp, ENABLE_ASF);
12965                         if (tg3_flag(tp, 5750_PLUS))
12966                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12967                 }
12968
12969                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12970                     tg3_flag(tp, 5750_PLUS))
12971                         tg3_flag_set(tp, ENABLE_APE);
12972
12973                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12974                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12975                         tg3_flag_clear(tp, WOL_CAP);
12976
12977                 if (tg3_flag(tp, WOL_CAP) &&
12978                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12979                         tg3_flag_set(tp, WOL_ENABLE);
12980                         device_set_wakeup_enable(&tp->pdev->dev, true);
12981                 }
12982
12983                 if (cfg2 & (1 << 17))
12984                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12985
12986                 /* serdes signal pre-emphasis in register 0x590 set by */
12987                 /* bootcode if bit 18 is set */
12988                 if (cfg2 & (1 << 18))
12989                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12990
12991                 if ((tg3_flag(tp, 57765_PLUS) ||
12992                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12993                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12994                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12995                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12996
12997                 if (tg3_flag(tp, PCI_EXPRESS) &&
12998                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12999                     !tg3_flag(tp, 57765_PLUS)) {
13000                         u32 cfg3;
13001
13002                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13003                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13004                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13005                 }
13006
13007                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13008                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13009                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13010                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13011                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13012                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13013         }
13014 done:
13015         if (tg3_flag(tp, WOL_CAP))
13016                 device_set_wakeup_enable(&tp->pdev->dev,
13017                                          tg3_flag(tp, WOL_ENABLE));
13018         else
13019                 device_set_wakeup_capable(&tp->pdev->dev, false);
13020 }
13021
13022 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13023 {
13024         int i;
13025         u32 val;
13026
13027         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13028         tw32(OTP_CTRL, cmd);
13029
13030         /* Wait for up to 1 ms for command to execute. */
13031         for (i = 0; i < 100; i++) {
13032                 val = tr32(OTP_STATUS);
13033                 if (val & OTP_STATUS_CMD_DONE)
13034                         break;
13035                 udelay(10);
13036         }
13037
13038         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13039 }
13040
13041 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13042  * configuration is a 32-bit value that straddles the alignment boundary.
13043  * We do two 32-bit reads and then shift and merge the results.
13044  */
13045 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13046 {
13047         u32 bhalf_otp, thalf_otp;
13048
13049         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13050
13051         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13052                 return 0;
13053
13054         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13055
13056         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13057                 return 0;
13058
13059         thalf_otp = tr32(OTP_READ_DATA);
13060
13061         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13062
13063         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13064                 return 0;
13065
13066         bhalf_otp = tr32(OTP_READ_DATA);
13067
13068         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13069 }
13070
13071 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13072 {
13073         u32 adv = ADVERTISED_Autoneg |
13074                   ADVERTISED_Pause;
13075
13076         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13077                 adv |= ADVERTISED_1000baseT_Half |
13078                        ADVERTISED_1000baseT_Full;
13079
13080         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13081                 adv |= ADVERTISED_100baseT_Half |
13082                        ADVERTISED_100baseT_Full |
13083                        ADVERTISED_10baseT_Half |
13084                        ADVERTISED_10baseT_Full |
13085                        ADVERTISED_TP;
13086         else
13087                 adv |= ADVERTISED_FIBRE;
13088
13089         tp->link_config.advertising = adv;
13090         tp->link_config.speed = SPEED_INVALID;
13091         tp->link_config.duplex = DUPLEX_INVALID;
13092         tp->link_config.autoneg = AUTONEG_ENABLE;
13093         tp->link_config.active_speed = SPEED_INVALID;
13094         tp->link_config.active_duplex = DUPLEX_INVALID;
13095         tp->link_config.orig_speed = SPEED_INVALID;
13096         tp->link_config.orig_duplex = DUPLEX_INVALID;
13097         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13098 }
13099
13100 static int __devinit tg3_phy_probe(struct tg3 *tp)
13101 {
13102         u32 hw_phy_id_1, hw_phy_id_2;
13103         u32 hw_phy_id, hw_phy_id_masked;
13104         int err;
13105
13106         /* flow control autonegotiation is default behavior */
13107         tg3_flag_set(tp, PAUSE_AUTONEG);
13108         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13109
13110         if (tg3_flag(tp, USE_PHYLIB))
13111                 return tg3_phy_init(tp);
13112
13113         /* Reading the PHY ID register can conflict with ASF
13114          * firmware access to the PHY hardware.
13115          */
13116         err = 0;
13117         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13118                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13119         } else {
13120                 /* Now read the physical PHY_ID from the chip and verify
13121                  * that it is sane.  If it doesn't look good, we fall back
13122                  * to either the hard-coded table based PHY_ID and failing
13123                  * that the value found in the eeprom area.
13124                  */
13125                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13126                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13127
13128                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13129                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13130                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13131
13132                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13133         }
13134
13135         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13136                 tp->phy_id = hw_phy_id;
13137                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13138                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13139                 else
13140                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13141         } else {
13142                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13143                         /* Do nothing, phy ID already set up in
13144                          * tg3_get_eeprom_hw_cfg().
13145                          */
13146                 } else {
13147                         struct subsys_tbl_ent *p;
13148
13149                         /* No eeprom signature?  Try the hardcoded
13150                          * subsys device table.
13151                          */
13152                         p = tg3_lookup_by_subsys(tp);
13153                         if (!p)
13154                                 return -ENODEV;
13155
13156                         tp->phy_id = p->phy_id;
13157                         if (!tp->phy_id ||
13158                             tp->phy_id == TG3_PHY_ID_BCM8002)
13159                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13160                 }
13161         }
13162
13163         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13164             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13165              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13166              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13167               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13168              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13169               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13170                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13171
13172         tg3_phy_init_link_config(tp);
13173
13174         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13175             !tg3_flag(tp, ENABLE_APE) &&
13176             !tg3_flag(tp, ENABLE_ASF)) {
13177                 u32 bmsr, mask;
13178
13179                 tg3_readphy(tp, MII_BMSR, &bmsr);
13180                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13181                     (bmsr & BMSR_LSTATUS))
13182                         goto skip_phy_reset;
13183
13184                 err = tg3_phy_reset(tp);
13185                 if (err)
13186                         return err;
13187
13188                 tg3_phy_set_wirespeed(tp);
13189
13190                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13191                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13192                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13193                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13194                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13195                                             tp->link_config.flowctrl);
13196
13197                         tg3_writephy(tp, MII_BMCR,
13198                                      BMCR_ANENABLE | BMCR_ANRESTART);
13199                 }
13200         }
13201
13202 skip_phy_reset:
13203         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13204                 err = tg3_init_5401phy_dsp(tp);
13205                 if (err)
13206                         return err;
13207
13208                 err = tg3_init_5401phy_dsp(tp);
13209         }
13210
13211         return err;
13212 }
13213
13214 static void __devinit tg3_read_vpd(struct tg3 *tp)
13215 {
13216         u8 *vpd_data;
13217         unsigned int block_end, rosize, len;
13218         u32 vpdlen;
13219         int j, i = 0;
13220
13221         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13222         if (!vpd_data)
13223                 goto out_no_vpd;
13224
13225         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13226         if (i < 0)
13227                 goto out_not_found;
13228
13229         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13230         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13231         i += PCI_VPD_LRDT_TAG_SIZE;
13232
13233         if (block_end > vpdlen)
13234                 goto out_not_found;
13235
13236         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13237                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13238         if (j > 0) {
13239                 len = pci_vpd_info_field_size(&vpd_data[j]);
13240
13241                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13242                 if (j + len > block_end || len != 4 ||
13243                     memcmp(&vpd_data[j], "1028", 4))
13244                         goto partno;
13245
13246                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13247                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13248                 if (j < 0)
13249                         goto partno;
13250
13251                 len = pci_vpd_info_field_size(&vpd_data[j]);
13252
13253                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13254                 if (j + len > block_end)
13255                         goto partno;
13256
13257                 memcpy(tp->fw_ver, &vpd_data[j], len);
13258                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13259         }
13260
13261 partno:
13262         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13263                                       PCI_VPD_RO_KEYWORD_PARTNO);
13264         if (i < 0)
13265                 goto out_not_found;
13266
13267         len = pci_vpd_info_field_size(&vpd_data[i]);
13268
13269         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13270         if (len > TG3_BPN_SIZE ||
13271             (len + i) > vpdlen)
13272                 goto out_not_found;
13273
13274         memcpy(tp->board_part_number, &vpd_data[i], len);
13275
13276 out_not_found:
13277         kfree(vpd_data);
13278         if (tp->board_part_number[0])
13279                 return;
13280
13281 out_no_vpd:
13282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13283                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13284                         strcpy(tp->board_part_number, "BCM5717");
13285                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13286                         strcpy(tp->board_part_number, "BCM5718");
13287                 else
13288                         goto nomatch;
13289         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13290                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13291                         strcpy(tp->board_part_number, "BCM57780");
13292                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13293                         strcpy(tp->board_part_number, "BCM57760");
13294                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13295                         strcpy(tp->board_part_number, "BCM57790");
13296                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13297                         strcpy(tp->board_part_number, "BCM57788");
13298                 else
13299                         goto nomatch;
13300         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13301                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13302                         strcpy(tp->board_part_number, "BCM57761");
13303                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13304                         strcpy(tp->board_part_number, "BCM57765");
13305                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13306                         strcpy(tp->board_part_number, "BCM57781");
13307                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13308                         strcpy(tp->board_part_number, "BCM57785");
13309                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13310                         strcpy(tp->board_part_number, "BCM57791");
13311                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13312                         strcpy(tp->board_part_number, "BCM57795");
13313                 else
13314                         goto nomatch;
13315         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13316                 strcpy(tp->board_part_number, "BCM95906");
13317         } else {
13318 nomatch:
13319                 strcpy(tp->board_part_number, "none");
13320         }
13321 }
13322
13323 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13324 {
13325         u32 val;
13326
13327         if (tg3_nvram_read(tp, offset, &val) ||
13328             (val & 0xfc000000) != 0x0c000000 ||
13329             tg3_nvram_read(tp, offset + 4, &val) ||
13330             val != 0)
13331                 return 0;
13332
13333         return 1;
13334 }
13335
13336 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13337 {
13338         u32 val, offset, start, ver_offset;
13339         int i, dst_off;
13340         bool newver = false;
13341
13342         if (tg3_nvram_read(tp, 0xc, &offset) ||
13343             tg3_nvram_read(tp, 0x4, &start))
13344                 return;
13345
13346         offset = tg3_nvram_logical_addr(tp, offset);
13347
13348         if (tg3_nvram_read(tp, offset, &val))
13349                 return;
13350
13351         if ((val & 0xfc000000) == 0x0c000000) {
13352                 if (tg3_nvram_read(tp, offset + 4, &val))
13353                         return;
13354
13355                 if (val == 0)
13356                         newver = true;
13357         }
13358
13359         dst_off = strlen(tp->fw_ver);
13360
13361         if (newver) {
13362                 if (TG3_VER_SIZE - dst_off < 16 ||
13363                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13364                         return;
13365
13366                 offset = offset + ver_offset - start;
13367                 for (i = 0; i < 16; i += 4) {
13368                         __be32 v;
13369                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13370                                 return;
13371
13372                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13373                 }
13374         } else {
13375                 u32 major, minor;
13376
13377                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13378                         return;
13379
13380                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13381                         TG3_NVM_BCVER_MAJSFT;
13382                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13383                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13384                          "v%d.%02d", major, minor);
13385         }
13386 }
13387
13388 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13389 {
13390         u32 val, major, minor;
13391
13392         /* Use native endian representation */
13393         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13394                 return;
13395
13396         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13397                 TG3_NVM_HWSB_CFG1_MAJSFT;
13398         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13399                 TG3_NVM_HWSB_CFG1_MINSFT;
13400
13401         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13402 }
13403
13404 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13405 {
13406         u32 offset, major, minor, build;
13407
13408         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13409
13410         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13411                 return;
13412
13413         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13414         case TG3_EEPROM_SB_REVISION_0:
13415                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13416                 break;
13417         case TG3_EEPROM_SB_REVISION_2:
13418                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13419                 break;
13420         case TG3_EEPROM_SB_REVISION_3:
13421                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13422                 break;
13423         case TG3_EEPROM_SB_REVISION_4:
13424                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13425                 break;
13426         case TG3_EEPROM_SB_REVISION_5:
13427                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13428                 break;
13429         case TG3_EEPROM_SB_REVISION_6:
13430                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13431                 break;
13432         default:
13433                 return;
13434         }
13435
13436         if (tg3_nvram_read(tp, offset, &val))
13437                 return;
13438
13439         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13440                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13441         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13442                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13443         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13444
13445         if (minor > 99 || build > 26)
13446                 return;
13447
13448         offset = strlen(tp->fw_ver);
13449         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13450                  " v%d.%02d", major, minor);
13451
13452         if (build > 0) {
13453                 offset = strlen(tp->fw_ver);
13454                 if (offset < TG3_VER_SIZE - 1)
13455                         tp->fw_ver[offset] = 'a' + build - 1;
13456         }
13457 }
13458
13459 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13460 {
13461         u32 val, offset, start;
13462         int i, vlen;
13463
13464         for (offset = TG3_NVM_DIR_START;
13465              offset < TG3_NVM_DIR_END;
13466              offset += TG3_NVM_DIRENT_SIZE) {
13467                 if (tg3_nvram_read(tp, offset, &val))
13468                         return;
13469
13470                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13471                         break;
13472         }
13473
13474         if (offset == TG3_NVM_DIR_END)
13475                 return;
13476
13477         if (!tg3_flag(tp, 5705_PLUS))
13478                 start = 0x08000000;
13479         else if (tg3_nvram_read(tp, offset - 4, &start))
13480                 return;
13481
13482         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13483             !tg3_fw_img_is_valid(tp, offset) ||
13484             tg3_nvram_read(tp, offset + 8, &val))
13485                 return;
13486
13487         offset += val - start;
13488
13489         vlen = strlen(tp->fw_ver);
13490
13491         tp->fw_ver[vlen++] = ',';
13492         tp->fw_ver[vlen++] = ' ';
13493
13494         for (i = 0; i < 4; i++) {
13495                 __be32 v;
13496                 if (tg3_nvram_read_be32(tp, offset, &v))
13497                         return;
13498
13499                 offset += sizeof(v);
13500
13501                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13502                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13503                         break;
13504                 }
13505
13506                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13507                 vlen += sizeof(v);
13508         }
13509 }
13510
13511 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13512 {
13513         int vlen;
13514         u32 apedata;
13515         char *fwtype;
13516
13517         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13518                 return;
13519
13520         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13521         if (apedata != APE_SEG_SIG_MAGIC)
13522                 return;
13523
13524         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13525         if (!(apedata & APE_FW_STATUS_READY))
13526                 return;
13527
13528         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13529
13530         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13531                 tg3_flag_set(tp, APE_HAS_NCSI);
13532                 fwtype = "NCSI";
13533         } else {
13534                 fwtype = "DASH";
13535         }
13536
13537         vlen = strlen(tp->fw_ver);
13538
13539         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13540                  fwtype,
13541                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13542                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13543                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13544                  (apedata & APE_FW_VERSION_BLDMSK));
13545 }
13546
13547 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13548 {
13549         u32 val;
13550         bool vpd_vers = false;
13551
13552         if (tp->fw_ver[0] != 0)
13553                 vpd_vers = true;
13554
13555         if (tg3_flag(tp, NO_NVRAM)) {
13556                 strcat(tp->fw_ver, "sb");
13557                 return;
13558         }
13559
13560         if (tg3_nvram_read(tp, 0, &val))
13561                 return;
13562
13563         if (val == TG3_EEPROM_MAGIC)
13564                 tg3_read_bc_ver(tp);
13565         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13566                 tg3_read_sb_ver(tp, val);
13567         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13568                 tg3_read_hwsb_ver(tp);
13569         else
13570                 return;
13571
13572         if (vpd_vers)
13573                 goto done;
13574
13575         if (tg3_flag(tp, ENABLE_APE)) {
13576                 if (tg3_flag(tp, ENABLE_ASF))
13577                         tg3_read_dash_ver(tp);
13578         } else if (tg3_flag(tp, ENABLE_ASF)) {
13579                 tg3_read_mgmtfw_ver(tp);
13580         }
13581
13582 done:
13583         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13584 }
13585
13586 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13587
13588 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13589 {
13590         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13591                 return TG3_RX_RET_MAX_SIZE_5717;
13592         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13593                 return TG3_RX_RET_MAX_SIZE_5700;
13594         else
13595                 return TG3_RX_RET_MAX_SIZE_5705;
13596 }
13597
13598 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13599         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13600         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13601         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13602         { },
13603 };
13604
13605 static int __devinit tg3_get_invariants(struct tg3 *tp)
13606 {
13607         u32 misc_ctrl_reg;
13608         u32 pci_state_reg, grc_misc_cfg;
13609         u32 val;
13610         u16 pci_cmd;
13611         int err;
13612
13613         /* Force memory write invalidate off.  If we leave it on,
13614          * then on 5700_BX chips we have to enable a workaround.
13615          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13616          * to match the cacheline size.  The Broadcom driver have this
13617          * workaround but turns MWI off all the times so never uses
13618          * it.  This seems to suggest that the workaround is insufficient.
13619          */
13620         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13621         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13622         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13623
13624         /* Important! -- Make sure register accesses are byteswapped
13625          * correctly.  Also, for those chips that require it, make
13626          * sure that indirect register accesses are enabled before
13627          * the first operation.
13628          */
13629         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13630                               &misc_ctrl_reg);
13631         tp->misc_host_ctrl |= (misc_ctrl_reg &
13632                                MISC_HOST_CTRL_CHIPREV);
13633         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13634                                tp->misc_host_ctrl);
13635
13636         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13637                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13639                 u32 prod_id_asic_rev;
13640
13641                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13642                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13643                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13644                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13645                         pci_read_config_dword(tp->pdev,
13646                                               TG3PCI_GEN2_PRODID_ASICREV,
13647                                               &prod_id_asic_rev);
13648                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13649                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13650                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13651                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13652                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13653                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13654                         pci_read_config_dword(tp->pdev,
13655                                               TG3PCI_GEN15_PRODID_ASICREV,
13656                                               &prod_id_asic_rev);
13657                 else
13658                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13659                                               &prod_id_asic_rev);
13660
13661                 tp->pci_chip_rev_id = prod_id_asic_rev;
13662         }
13663
13664         /* Wrong chip ID in 5752 A0. This code can be removed later
13665          * as A0 is not in production.
13666          */
13667         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13668                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13669
13670         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13671          * we need to disable memory and use config. cycles
13672          * only to access all registers. The 5702/03 chips
13673          * can mistakenly decode the special cycles from the
13674          * ICH chipsets as memory write cycles, causing corruption
13675          * of register and memory space. Only certain ICH bridges
13676          * will drive special cycles with non-zero data during the
13677          * address phase which can fall within the 5703's address
13678          * range. This is not an ICH bug as the PCI spec allows
13679          * non-zero address during special cycles. However, only
13680          * these ICH bridges are known to drive non-zero addresses
13681          * during special cycles.
13682          *
13683          * Since special cycles do not cross PCI bridges, we only
13684          * enable this workaround if the 5703 is on the secondary
13685          * bus of these ICH bridges.
13686          */
13687         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13688             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13689                 static struct tg3_dev_id {
13690                         u32     vendor;
13691                         u32     device;
13692                         u32     rev;
13693                 } ich_chipsets[] = {
13694                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13695                           PCI_ANY_ID },
13696                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13697                           PCI_ANY_ID },
13698                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13699                           0xa },
13700                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13701                           PCI_ANY_ID },
13702                         { },
13703                 };
13704                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13705                 struct pci_dev *bridge = NULL;
13706
13707                 while (pci_id->vendor != 0) {
13708                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13709                                                 bridge);
13710                         if (!bridge) {
13711                                 pci_id++;
13712                                 continue;
13713                         }
13714                         if (pci_id->rev != PCI_ANY_ID) {
13715                                 if (bridge->revision > pci_id->rev)
13716                                         continue;
13717                         }
13718                         if (bridge->subordinate &&
13719                             (bridge->subordinate->number ==
13720                              tp->pdev->bus->number)) {
13721                                 tg3_flag_set(tp, ICH_WORKAROUND);
13722                                 pci_dev_put(bridge);
13723                                 break;
13724                         }
13725                 }
13726         }
13727
13728         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13729                 static struct tg3_dev_id {
13730                         u32     vendor;
13731                         u32     device;
13732                 } bridge_chipsets[] = {
13733                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13734                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13735                         { },
13736                 };
13737                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13738                 struct pci_dev *bridge = NULL;
13739
13740                 while (pci_id->vendor != 0) {
13741                         bridge = pci_get_device(pci_id->vendor,
13742                                                 pci_id->device,
13743                                                 bridge);
13744                         if (!bridge) {
13745                                 pci_id++;
13746                                 continue;
13747                         }
13748                         if (bridge->subordinate &&
13749                             (bridge->subordinate->number <=
13750                              tp->pdev->bus->number) &&
13751                             (bridge->subordinate->subordinate >=
13752                              tp->pdev->bus->number)) {
13753                                 tg3_flag_set(tp, 5701_DMA_BUG);
13754                                 pci_dev_put(bridge);
13755                                 break;
13756                         }
13757                 }
13758         }
13759
13760         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13761          * DMA addresses > 40-bit. This bridge may have other additional
13762          * 57xx devices behind it in some 4-port NIC designs for example.
13763          * Any tg3 device found behind the bridge will also need the 40-bit
13764          * DMA workaround.
13765          */
13766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13767             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13768                 tg3_flag_set(tp, 5780_CLASS);
13769                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13770                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13771         } else {
13772                 struct pci_dev *bridge = NULL;
13773
13774                 do {
13775                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13776                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13777                                                 bridge);
13778                         if (bridge && bridge->subordinate &&
13779                             (bridge->subordinate->number <=
13780                              tp->pdev->bus->number) &&
13781                             (bridge->subordinate->subordinate >=
13782                              tp->pdev->bus->number)) {
13783                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13784                                 pci_dev_put(bridge);
13785                                 break;
13786                         }
13787                 } while (bridge);
13788         }
13789
13790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13792                 tp->pdev_peer = tg3_find_peer(tp);
13793
13794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13796             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13797                 tg3_flag_set(tp, 5717_PLUS);
13798
13799         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13800             tg3_flag(tp, 5717_PLUS))
13801                 tg3_flag_set(tp, 57765_PLUS);
13802
13803         /* Intentionally exclude ASIC_REV_5906 */
13804         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13805             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13806             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13807             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13808             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13809             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13810             tg3_flag(tp, 57765_PLUS))
13811                 tg3_flag_set(tp, 5755_PLUS);
13812
13813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13815             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13816             tg3_flag(tp, 5755_PLUS) ||
13817             tg3_flag(tp, 5780_CLASS))
13818                 tg3_flag_set(tp, 5750_PLUS);
13819
13820         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13821             tg3_flag(tp, 5750_PLUS))
13822                 tg3_flag_set(tp, 5705_PLUS);
13823
13824         /* Determine TSO capabilities */
13825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13826                 ; /* Do nothing. HW bug. */
13827         else if (tg3_flag(tp, 57765_PLUS))
13828                 tg3_flag_set(tp, HW_TSO_3);
13829         else if (tg3_flag(tp, 5755_PLUS) ||
13830                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13831                 tg3_flag_set(tp, HW_TSO_2);
13832         else if (tg3_flag(tp, 5750_PLUS)) {
13833                 tg3_flag_set(tp, HW_TSO_1);
13834                 tg3_flag_set(tp, TSO_BUG);
13835                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13836                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13837                         tg3_flag_clear(tp, TSO_BUG);
13838         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13839                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13840                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13841                         tg3_flag_set(tp, TSO_BUG);
13842                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13843                         tp->fw_needed = FIRMWARE_TG3TSO5;
13844                 else
13845                         tp->fw_needed = FIRMWARE_TG3TSO;
13846         }
13847
13848         /* Selectively allow TSO based on operating conditions */
13849         if (tg3_flag(tp, HW_TSO_1) ||
13850             tg3_flag(tp, HW_TSO_2) ||
13851             tg3_flag(tp, HW_TSO_3) ||
13852             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13853                 tg3_flag_set(tp, TSO_CAPABLE);
13854         else {
13855                 tg3_flag_clear(tp, TSO_CAPABLE);
13856                 tg3_flag_clear(tp, TSO_BUG);
13857                 tp->fw_needed = NULL;
13858         }
13859
13860         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13861                 tp->fw_needed = FIRMWARE_TG3;
13862
13863         tp->irq_max = 1;
13864
13865         if (tg3_flag(tp, 5750_PLUS)) {
13866                 tg3_flag_set(tp, SUPPORT_MSI);
13867                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13868                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13869                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13870                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13871                      tp->pdev_peer == tp->pdev))
13872                         tg3_flag_clear(tp, SUPPORT_MSI);
13873
13874                 if (tg3_flag(tp, 5755_PLUS) ||
13875                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13876                         tg3_flag_set(tp, 1SHOT_MSI);
13877                 }
13878
13879                 if (tg3_flag(tp, 57765_PLUS)) {
13880                         tg3_flag_set(tp, SUPPORT_MSIX);
13881                         tp->irq_max = TG3_IRQ_MAX_VECS;
13882                 }
13883         }
13884
13885         if (tg3_flag(tp, 5755_PLUS))
13886                 tg3_flag_set(tp, SHORT_DMA_BUG);
13887
13888         if (tg3_flag(tp, 5717_PLUS))
13889                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13890
13891         if (tg3_flag(tp, 57765_PLUS) &&
13892             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13893                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13894
13895         if (!tg3_flag(tp, 5705_PLUS) ||
13896             tg3_flag(tp, 5780_CLASS) ||
13897             tg3_flag(tp, USE_JUMBO_BDFLAG))
13898                 tg3_flag_set(tp, JUMBO_CAPABLE);
13899
13900         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13901                               &pci_state_reg);
13902
13903         if (pci_is_pcie(tp->pdev)) {
13904                 u16 lnkctl;
13905
13906                 tg3_flag_set(tp, PCI_EXPRESS);
13907
13908                 tp->pcie_readrq = 4096;
13909                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13910                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13911                         tp->pcie_readrq = 2048;
13912
13913                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13914
13915                 pci_read_config_word(tp->pdev,
13916                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13917                                      &lnkctl);
13918                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13919                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13920                             ASIC_REV_5906) {
13921                                 tg3_flag_clear(tp, HW_TSO_2);
13922                                 tg3_flag_clear(tp, TSO_CAPABLE);
13923                         }
13924                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13925                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13926                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13927                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13928                                 tg3_flag_set(tp, CLKREQ_BUG);
13929                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13930                         tg3_flag_set(tp, L1PLLPD_EN);
13931                 }
13932         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13933                 /* BCM5785 devices are effectively PCIe devices, and should
13934                  * follow PCIe codepaths, but do not have a PCIe capabilities
13935                  * section.
13936                 */
13937                 tg3_flag_set(tp, PCI_EXPRESS);
13938         } else if (!tg3_flag(tp, 5705_PLUS) ||
13939                    tg3_flag(tp, 5780_CLASS)) {
13940                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13941                 if (!tp->pcix_cap) {
13942                         dev_err(&tp->pdev->dev,
13943                                 "Cannot find PCI-X capability, aborting\n");
13944                         return -EIO;
13945                 }
13946
13947                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13948                         tg3_flag_set(tp, PCIX_MODE);
13949         }
13950
13951         /* If we have an AMD 762 or VIA K8T800 chipset, write
13952          * reordering to the mailbox registers done by the host
13953          * controller can cause major troubles.  We read back from
13954          * every mailbox register write to force the writes to be
13955          * posted to the chip in order.
13956          */
13957         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13958             !tg3_flag(tp, PCI_EXPRESS))
13959                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13960
13961         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13962                              &tp->pci_cacheline_sz);
13963         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13964                              &tp->pci_lat_timer);
13965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13966             tp->pci_lat_timer < 64) {
13967                 tp->pci_lat_timer = 64;
13968                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13969                                       tp->pci_lat_timer);
13970         }
13971
13972         /* Important! -- It is critical that the PCI-X hw workaround
13973          * situation is decided before the first MMIO register access.
13974          */
13975         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13976                 /* 5700 BX chips need to have their TX producer index
13977                  * mailboxes written twice to workaround a bug.
13978                  */
13979                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13980
13981                 /* If we are in PCI-X mode, enable register write workaround.
13982                  *
13983                  * The workaround is to use indirect register accesses
13984                  * for all chip writes not to mailbox registers.
13985                  */
13986                 if (tg3_flag(tp, PCIX_MODE)) {
13987                         u32 pm_reg;
13988
13989                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13990
13991                         /* The chip can have it's power management PCI config
13992                          * space registers clobbered due to this bug.
13993                          * So explicitly force the chip into D0 here.
13994                          */
13995                         pci_read_config_dword(tp->pdev,
13996                                               tp->pm_cap + PCI_PM_CTRL,
13997                                               &pm_reg);
13998                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13999                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14000                         pci_write_config_dword(tp->pdev,
14001                                                tp->pm_cap + PCI_PM_CTRL,
14002                                                pm_reg);
14003
14004                         /* Also, force SERR#/PERR# in PCI command. */
14005                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14006                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14007                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14008                 }
14009         }
14010
14011         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14012                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14013         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14014                 tg3_flag_set(tp, PCI_32BIT);
14015
14016         /* Chip-specific fixup from Broadcom driver */
14017         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14018             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14019                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14020                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14021         }
14022
14023         /* Default fast path register access methods */
14024         tp->read32 = tg3_read32;
14025         tp->write32 = tg3_write32;
14026         tp->read32_mbox = tg3_read32;
14027         tp->write32_mbox = tg3_write32;
14028         tp->write32_tx_mbox = tg3_write32;
14029         tp->write32_rx_mbox = tg3_write32;
14030
14031         /* Various workaround register access methods */
14032         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14033                 tp->write32 = tg3_write_indirect_reg32;
14034         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14035                  (tg3_flag(tp, PCI_EXPRESS) &&
14036                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14037                 /*
14038                  * Back to back register writes can cause problems on these
14039                  * chips, the workaround is to read back all reg writes
14040                  * except those to mailbox regs.
14041                  *
14042                  * See tg3_write_indirect_reg32().
14043                  */
14044                 tp->write32 = tg3_write_flush_reg32;
14045         }
14046
14047         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14048                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14049                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14050                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14051         }
14052
14053         if (tg3_flag(tp, ICH_WORKAROUND)) {
14054                 tp->read32 = tg3_read_indirect_reg32;
14055                 tp->write32 = tg3_write_indirect_reg32;
14056                 tp->read32_mbox = tg3_read_indirect_mbox;
14057                 tp->write32_mbox = tg3_write_indirect_mbox;
14058                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14059                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14060
14061                 iounmap(tp->regs);
14062                 tp->regs = NULL;
14063
14064                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14065                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14066                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14067         }
14068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14069                 tp->read32_mbox = tg3_read32_mbox_5906;
14070                 tp->write32_mbox = tg3_write32_mbox_5906;
14071                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14072                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14073         }
14074
14075         if (tp->write32 == tg3_write_indirect_reg32 ||
14076             (tg3_flag(tp, PCIX_MODE) &&
14077              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14078               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14079                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14080
14081         /* The memory arbiter has to be enabled in order for SRAM accesses
14082          * to succeed.  Normally on powerup the tg3 chip firmware will make
14083          * sure it is enabled, but other entities such as system netboot
14084          * code might disable it.
14085          */
14086         val = tr32(MEMARB_MODE);
14087         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14088
14089         if (tg3_flag(tp, PCIX_MODE)) {
14090                 pci_read_config_dword(tp->pdev,
14091                                       tp->pcix_cap + PCI_X_STATUS, &val);
14092                 tp->pci_fn = val & 0x7;
14093         } else {
14094                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14095         }
14096
14097         /* Get eeprom hw config before calling tg3_set_power_state().
14098          * In particular, the TG3_FLAG_IS_NIC flag must be
14099          * determined before calling tg3_set_power_state() so that
14100          * we know whether or not to switch out of Vaux power.
14101          * When the flag is set, it means that GPIO1 is used for eeprom
14102          * write protect and also implies that it is a LOM where GPIOs
14103          * are not used to switch power.
14104          */
14105         tg3_get_eeprom_hw_cfg(tp);
14106
14107         if (tg3_flag(tp, ENABLE_APE)) {
14108                 /* Allow reads and writes to the
14109                  * APE register and memory space.
14110                  */
14111                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14112                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14113                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14114                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14115                                        pci_state_reg);
14116
14117                 tg3_ape_lock_init(tp);
14118         }
14119
14120         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14121             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14124             tg3_flag(tp, 57765_PLUS))
14125                 tg3_flag_set(tp, CPMU_PRESENT);
14126
14127         /* Set up tp->grc_local_ctrl before calling
14128          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14129          * will bring 5700's external PHY out of reset.
14130          * It is also used as eeprom write protect on LOMs.
14131          */
14132         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14134             tg3_flag(tp, EEPROM_WRITE_PROT))
14135                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14136                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14137         /* Unused GPIO3 must be driven as output on 5752 because there
14138          * are no pull-up resistors on unused GPIO pins.
14139          */
14140         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14141                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14142
14143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14146                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14147
14148         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14149             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14150                 /* Turn off the debug UART. */
14151                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14152                 if (tg3_flag(tp, IS_NIC))
14153                         /* Keep VMain power. */
14154                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14155                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14156         }
14157
14158         /* Switch out of Vaux if it is a NIC */
14159         tg3_pwrsrc_switch_to_vmain(tp);
14160
14161         /* Derive initial jumbo mode from MTU assigned in
14162          * ether_setup() via the alloc_etherdev() call
14163          */
14164         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14165                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14166
14167         /* Determine WakeOnLan speed to use. */
14168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14169             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14170             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14171             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14172                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14173         } else {
14174                 tg3_flag_set(tp, WOL_SPEED_100MB);
14175         }
14176
14177         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14178                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14179
14180         /* A few boards don't want Ethernet@WireSpeed phy feature */
14181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14182             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14183              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14184              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14185             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14186             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14187                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14188
14189         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14190             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14191                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14192         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14193                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14194
14195         if (tg3_flag(tp, 5705_PLUS) &&
14196             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14197             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14198             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14199             !tg3_flag(tp, 57765_PLUS)) {
14200                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14201                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14202                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14203                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14204                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14205                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14206                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14207                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14208                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14209                 } else
14210                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14211         }
14212
14213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14214             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14215                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14216                 if (tp->phy_otp == 0)
14217                         tp->phy_otp = TG3_OTP_DEFAULT;
14218         }
14219
14220         if (tg3_flag(tp, CPMU_PRESENT))
14221                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14222         else
14223                 tp->mi_mode = MAC_MI_MODE_BASE;
14224
14225         tp->coalesce_mode = 0;
14226         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14227             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14228                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14229
14230         /* Set these bits to enable statistics workaround. */
14231         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14232             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14233             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14234                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14235                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14236         }
14237
14238         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14239             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14240                 tg3_flag_set(tp, USE_PHYLIB);
14241
14242         err = tg3_mdio_init(tp);
14243         if (err)
14244                 return err;
14245
14246         /* Initialize data/descriptor byte/word swapping. */
14247         val = tr32(GRC_MODE);
14248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14249                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14250                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14251                         GRC_MODE_B2HRX_ENABLE |
14252                         GRC_MODE_HTX2B_ENABLE |
14253                         GRC_MODE_HOST_STACKUP);
14254         else
14255                 val &= GRC_MODE_HOST_STACKUP;
14256
14257         tw32(GRC_MODE, val | tp->grc_mode);
14258
14259         tg3_switch_clocks(tp);
14260
14261         /* Clear this out for sanity. */
14262         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14263
14264         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14265                               &pci_state_reg);
14266         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14267             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14268                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14269
14270                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14271                     chiprevid == CHIPREV_ID_5701_B0 ||
14272                     chiprevid == CHIPREV_ID_5701_B2 ||
14273                     chiprevid == CHIPREV_ID_5701_B5) {
14274                         void __iomem *sram_base;
14275
14276                         /* Write some dummy words into the SRAM status block
14277                          * area, see if it reads back correctly.  If the return
14278                          * value is bad, force enable the PCIX workaround.
14279                          */
14280                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14281
14282                         writel(0x00000000, sram_base);
14283                         writel(0x00000000, sram_base + 4);
14284                         writel(0xffffffff, sram_base + 4);
14285                         if (readl(sram_base) != 0x00000000)
14286                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14287                 }
14288         }
14289
14290         udelay(50);
14291         tg3_nvram_init(tp);
14292
14293         grc_misc_cfg = tr32(GRC_MISC_CFG);
14294         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14295
14296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14297             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14298              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14299                 tg3_flag_set(tp, IS_5788);
14300
14301         if (!tg3_flag(tp, IS_5788) &&
14302             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14303                 tg3_flag_set(tp, TAGGED_STATUS);
14304         if (tg3_flag(tp, TAGGED_STATUS)) {
14305                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14306                                       HOSTCC_MODE_CLRTICK_TXBD);
14307
14308                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14309                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14310                                        tp->misc_host_ctrl);
14311         }
14312
14313         /* Preserve the APE MAC_MODE bits */
14314         if (tg3_flag(tp, ENABLE_APE))
14315                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14316         else
14317                 tp->mac_mode = TG3_DEF_MAC_MODE;
14318
14319         /* these are limited to 10/100 only */
14320         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14321              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14322             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14323              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14324              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14325               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14326               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14327             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14328              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14329               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14330               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14331             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14332             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14333             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14334             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14335                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14336
14337         err = tg3_phy_probe(tp);
14338         if (err) {
14339                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14340                 /* ... but do not return immediately ... */
14341                 tg3_mdio_fini(tp);
14342         }
14343
14344         tg3_read_vpd(tp);
14345         tg3_read_fw_ver(tp);
14346
14347         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14348                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14349         } else {
14350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14351                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14352                 else
14353                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14354         }
14355
14356         /* 5700 {AX,BX} chips have a broken status block link
14357          * change bit implementation, so we must use the
14358          * status register in those cases.
14359          */
14360         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14361                 tg3_flag_set(tp, USE_LINKCHG_REG);
14362         else
14363                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14364
14365         /* The led_ctrl is set during tg3_phy_probe, here we might
14366          * have to force the link status polling mechanism based
14367          * upon subsystem IDs.
14368          */
14369         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14371             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14372                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14373                 tg3_flag_set(tp, USE_LINKCHG_REG);
14374         }
14375
14376         /* For all SERDES we poll the MAC status register. */
14377         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14378                 tg3_flag_set(tp, POLL_SERDES);
14379         else
14380                 tg3_flag_clear(tp, POLL_SERDES);
14381
14382         tp->rx_offset = NET_IP_ALIGN;
14383         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14385             tg3_flag(tp, PCIX_MODE)) {
14386                 tp->rx_offset = 0;
14387 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14388                 tp->rx_copy_thresh = ~(u16)0;
14389 #endif
14390         }
14391
14392         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14393         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14394         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14395
14396         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14397
14398         /* Increment the rx prod index on the rx std ring by at most
14399          * 8 for these chips to workaround hw errata.
14400          */
14401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14404                 tp->rx_std_max_post = 8;
14405
14406         if (tg3_flag(tp, ASPM_WORKAROUND))
14407                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14408                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14409
14410         return err;
14411 }
14412
14413 #ifdef CONFIG_SPARC
14414 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14415 {
14416         struct net_device *dev = tp->dev;
14417         struct pci_dev *pdev = tp->pdev;
14418         struct device_node *dp = pci_device_to_OF_node(pdev);
14419         const unsigned char *addr;
14420         int len;
14421
14422         addr = of_get_property(dp, "local-mac-address", &len);
14423         if (addr && len == 6) {
14424                 memcpy(dev->dev_addr, addr, 6);
14425                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14426                 return 0;
14427         }
14428         return -ENODEV;
14429 }
14430
14431 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14432 {
14433         struct net_device *dev = tp->dev;
14434
14435         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14436         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14437         return 0;
14438 }
14439 #endif
14440
14441 static int __devinit tg3_get_device_address(struct tg3 *tp)
14442 {
14443         struct net_device *dev = tp->dev;
14444         u32 hi, lo, mac_offset;
14445         int addr_ok = 0;
14446
14447 #ifdef CONFIG_SPARC
14448         if (!tg3_get_macaddr_sparc(tp))
14449                 return 0;
14450 #endif
14451
14452         mac_offset = 0x7c;
14453         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14454             tg3_flag(tp, 5780_CLASS)) {
14455                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14456                         mac_offset = 0xcc;
14457                 if (tg3_nvram_lock(tp))
14458                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14459                 else
14460                         tg3_nvram_unlock(tp);
14461         } else if (tg3_flag(tp, 5717_PLUS)) {
14462                 if (tp->pci_fn & 1)
14463                         mac_offset = 0xcc;
14464                 if (tp->pci_fn > 1)
14465                         mac_offset += 0x18c;
14466         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14467                 mac_offset = 0x10;
14468
14469         /* First try to get it from MAC address mailbox. */
14470         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14471         if ((hi >> 16) == 0x484b) {
14472                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14473                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14474
14475                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14476                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14477                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14478                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14479                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14480
14481                 /* Some old bootcode may report a 0 MAC address in SRAM */
14482                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14483         }
14484         if (!addr_ok) {
14485                 /* Next, try NVRAM. */
14486                 if (!tg3_flag(tp, NO_NVRAM) &&
14487                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14488                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14489                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14490                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14491                 }
14492                 /* Finally just fetch it out of the MAC control regs. */
14493                 else {
14494                         hi = tr32(MAC_ADDR_0_HIGH);
14495                         lo = tr32(MAC_ADDR_0_LOW);
14496
14497                         dev->dev_addr[5] = lo & 0xff;
14498                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14499                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14500                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14501                         dev->dev_addr[1] = hi & 0xff;
14502                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14503                 }
14504         }
14505
14506         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14507 #ifdef CONFIG_SPARC
14508                 if (!tg3_get_default_macaddr_sparc(tp))
14509                         return 0;
14510 #endif
14511                 return -EINVAL;
14512         }
14513         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14514         return 0;
14515 }
14516
14517 #define BOUNDARY_SINGLE_CACHELINE       1
14518 #define BOUNDARY_MULTI_CACHELINE        2
14519
14520 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14521 {
14522         int cacheline_size;
14523         u8 byte;
14524         int goal;
14525
14526         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14527         if (byte == 0)
14528                 cacheline_size = 1024;
14529         else
14530                 cacheline_size = (int) byte * 4;
14531
14532         /* On 5703 and later chips, the boundary bits have no
14533          * effect.
14534          */
14535         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14536             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14537             !tg3_flag(tp, PCI_EXPRESS))
14538                 goto out;
14539
14540 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14541         goal = BOUNDARY_MULTI_CACHELINE;
14542 #else
14543 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14544         goal = BOUNDARY_SINGLE_CACHELINE;
14545 #else
14546         goal = 0;
14547 #endif
14548 #endif
14549
14550         if (tg3_flag(tp, 57765_PLUS)) {
14551                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14552                 goto out;
14553         }
14554
14555         if (!goal)
14556                 goto out;
14557
14558         /* PCI controllers on most RISC systems tend to disconnect
14559          * when a device tries to burst across a cache-line boundary.
14560          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14561          *
14562          * Unfortunately, for PCI-E there are only limited
14563          * write-side controls for this, and thus for reads
14564          * we will still get the disconnects.  We'll also waste
14565          * these PCI cycles for both read and write for chips
14566          * other than 5700 and 5701 which do not implement the
14567          * boundary bits.
14568          */
14569         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14570                 switch (cacheline_size) {
14571                 case 16:
14572                 case 32:
14573                 case 64:
14574                 case 128:
14575                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14576                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14577                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14578                         } else {
14579                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14580                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14581                         }
14582                         break;
14583
14584                 case 256:
14585                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14586                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14587                         break;
14588
14589                 default:
14590                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14591                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14592                         break;
14593                 }
14594         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14595                 switch (cacheline_size) {
14596                 case 16:
14597                 case 32:
14598                 case 64:
14599                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14600                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14601                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14602                                 break;
14603                         }
14604                         /* fallthrough */
14605                 case 128:
14606                 default:
14607                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14608                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14609                         break;
14610                 }
14611         } else {
14612                 switch (cacheline_size) {
14613                 case 16:
14614                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14615                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14616                                         DMA_RWCTRL_WRITE_BNDRY_16);
14617                                 break;
14618                         }
14619                         /* fallthrough */
14620                 case 32:
14621                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14622                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14623                                         DMA_RWCTRL_WRITE_BNDRY_32);
14624                                 break;
14625                         }
14626                         /* fallthrough */
14627                 case 64:
14628                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14629                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14630                                         DMA_RWCTRL_WRITE_BNDRY_64);
14631                                 break;
14632                         }
14633                         /* fallthrough */
14634                 case 128:
14635                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14636                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14637                                         DMA_RWCTRL_WRITE_BNDRY_128);
14638                                 break;
14639                         }
14640                         /* fallthrough */
14641                 case 256:
14642                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14643                                 DMA_RWCTRL_WRITE_BNDRY_256);
14644                         break;
14645                 case 512:
14646                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14647                                 DMA_RWCTRL_WRITE_BNDRY_512);
14648                         break;
14649                 case 1024:
14650                 default:
14651                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14652                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14653                         break;
14654                 }
14655         }
14656
14657 out:
14658         return val;
14659 }
14660
14661 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14662 {
14663         struct tg3_internal_buffer_desc test_desc;
14664         u32 sram_dma_descs;
14665         int i, ret;
14666
14667         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14668
14669         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14670         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14671         tw32(RDMAC_STATUS, 0);
14672         tw32(WDMAC_STATUS, 0);
14673
14674         tw32(BUFMGR_MODE, 0);
14675         tw32(FTQ_RESET, 0);
14676
14677         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14678         test_desc.addr_lo = buf_dma & 0xffffffff;
14679         test_desc.nic_mbuf = 0x00002100;
14680         test_desc.len = size;
14681
14682         /*
14683          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14684          * the *second* time the tg3 driver was getting loaded after an
14685          * initial scan.
14686          *
14687          * Broadcom tells me:
14688          *   ...the DMA engine is connected to the GRC block and a DMA
14689          *   reset may affect the GRC block in some unpredictable way...
14690          *   The behavior of resets to individual blocks has not been tested.
14691          *
14692          * Broadcom noted the GRC reset will also reset all sub-components.
14693          */
14694         if (to_device) {
14695                 test_desc.cqid_sqid = (13 << 8) | 2;
14696
14697                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14698                 udelay(40);
14699         } else {
14700                 test_desc.cqid_sqid = (16 << 8) | 7;
14701
14702                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14703                 udelay(40);
14704         }
14705         test_desc.flags = 0x00000005;
14706
14707         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14708                 u32 val;
14709
14710                 val = *(((u32 *)&test_desc) + i);
14711                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14712                                        sram_dma_descs + (i * sizeof(u32)));
14713                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14714         }
14715         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14716
14717         if (to_device)
14718                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14719         else
14720                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14721
14722         ret = -ENODEV;
14723         for (i = 0; i < 40; i++) {
14724                 u32 val;
14725
14726                 if (to_device)
14727                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14728                 else
14729                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14730                 if ((val & 0xffff) == sram_dma_descs) {
14731                         ret = 0;
14732                         break;
14733                 }
14734
14735                 udelay(100);
14736         }
14737
14738         return ret;
14739 }
14740
14741 #define TEST_BUFFER_SIZE        0x2000
14742
14743 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14744         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14745         { },
14746 };
14747
14748 static int __devinit tg3_test_dma(struct tg3 *tp)
14749 {
14750         dma_addr_t buf_dma;
14751         u32 *buf, saved_dma_rwctrl;
14752         int ret = 0;
14753
14754         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14755                                  &buf_dma, GFP_KERNEL);
14756         if (!buf) {
14757                 ret = -ENOMEM;
14758                 goto out_nofree;
14759         }
14760
14761         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14762                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14763
14764         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14765
14766         if (tg3_flag(tp, 57765_PLUS))
14767                 goto out;
14768
14769         if (tg3_flag(tp, PCI_EXPRESS)) {
14770                 /* DMA read watermark not used on PCIE */
14771                 tp->dma_rwctrl |= 0x00180000;
14772         } else if (!tg3_flag(tp, PCIX_MODE)) {
14773                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14774                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14775                         tp->dma_rwctrl |= 0x003f0000;
14776                 else
14777                         tp->dma_rwctrl |= 0x003f000f;
14778         } else {
14779                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14780                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14781                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14782                         u32 read_water = 0x7;
14783
14784                         /* If the 5704 is behind the EPB bridge, we can
14785                          * do the less restrictive ONE_DMA workaround for
14786                          * better performance.
14787                          */
14788                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14789                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14790                                 tp->dma_rwctrl |= 0x8000;
14791                         else if (ccval == 0x6 || ccval == 0x7)
14792                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14793
14794                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14795                                 read_water = 4;
14796                         /* Set bit 23 to enable PCIX hw bug fix */
14797                         tp->dma_rwctrl |=
14798                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14799                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14800                                 (1 << 23);
14801                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14802                         /* 5780 always in PCIX mode */
14803                         tp->dma_rwctrl |= 0x00144000;
14804                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14805                         /* 5714 always in PCIX mode */
14806                         tp->dma_rwctrl |= 0x00148000;
14807                 } else {
14808                         tp->dma_rwctrl |= 0x001b000f;
14809                 }
14810         }
14811
14812         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14813             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14814                 tp->dma_rwctrl &= 0xfffffff0;
14815
14816         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14817             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14818                 /* Remove this if it causes problems for some boards. */
14819                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14820
14821                 /* On 5700/5701 chips, we need to set this bit.
14822                  * Otherwise the chip will issue cacheline transactions
14823                  * to streamable DMA memory with not all the byte
14824                  * enables turned on.  This is an error on several
14825                  * RISC PCI controllers, in particular sparc64.
14826                  *
14827                  * On 5703/5704 chips, this bit has been reassigned
14828                  * a different meaning.  In particular, it is used
14829                  * on those chips to enable a PCI-X workaround.
14830                  */
14831                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14832         }
14833
14834         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14835
14836 #if 0
14837         /* Unneeded, already done by tg3_get_invariants.  */
14838         tg3_switch_clocks(tp);
14839 #endif
14840
14841         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14842             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14843                 goto out;
14844
14845         /* It is best to perform DMA test with maximum write burst size
14846          * to expose the 5700/5701 write DMA bug.
14847          */
14848         saved_dma_rwctrl = tp->dma_rwctrl;
14849         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14850         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14851
14852         while (1) {
14853                 u32 *p = buf, i;
14854
14855                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14856                         p[i] = i;
14857
14858                 /* Send the buffer to the chip. */
14859                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14860                 if (ret) {
14861                         dev_err(&tp->pdev->dev,
14862                                 "%s: Buffer write failed. err = %d\n",
14863                                 __func__, ret);
14864                         break;
14865                 }
14866
14867 #if 0
14868                 /* validate data reached card RAM correctly. */
14869                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14870                         u32 val;
14871                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14872                         if (le32_to_cpu(val) != p[i]) {
14873                                 dev_err(&tp->pdev->dev,
14874                                         "%s: Buffer corrupted on device! "
14875                                         "(%d != %d)\n", __func__, val, i);
14876                                 /* ret = -ENODEV here? */
14877                         }
14878                         p[i] = 0;
14879                 }
14880 #endif
14881                 /* Now read it back. */
14882                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14883                 if (ret) {
14884                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14885                                 "err = %d\n", __func__, ret);
14886                         break;
14887                 }
14888
14889                 /* Verify it. */
14890                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14891                         if (p[i] == i)
14892                                 continue;
14893
14894                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14895                             DMA_RWCTRL_WRITE_BNDRY_16) {
14896                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14897                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14898                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14899                                 break;
14900                         } else {
14901                                 dev_err(&tp->pdev->dev,
14902                                         "%s: Buffer corrupted on read back! "
14903                                         "(%d != %d)\n", __func__, p[i], i);
14904                                 ret = -ENODEV;
14905                                 goto out;
14906                         }
14907                 }
14908
14909                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14910                         /* Success. */
14911                         ret = 0;
14912                         break;
14913                 }
14914         }
14915         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14916             DMA_RWCTRL_WRITE_BNDRY_16) {
14917                 /* DMA test passed without adjusting DMA boundary,
14918                  * now look for chipsets that are known to expose the
14919                  * DMA bug without failing the test.
14920                  */
14921                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14922                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14923                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14924                 } else {
14925                         /* Safe to use the calculated DMA boundary. */
14926                         tp->dma_rwctrl = saved_dma_rwctrl;
14927                 }
14928
14929                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14930         }
14931
14932 out:
14933         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14934 out_nofree:
14935         return ret;
14936 }
14937
14938 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14939 {
14940         if (tg3_flag(tp, 57765_PLUS)) {
14941                 tp->bufmgr_config.mbuf_read_dma_low_water =
14942                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14943                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14944                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14945                 tp->bufmgr_config.mbuf_high_water =
14946                         DEFAULT_MB_HIGH_WATER_57765;
14947
14948                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14949                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14950                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14951                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14952                 tp->bufmgr_config.mbuf_high_water_jumbo =
14953                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14954         } else if (tg3_flag(tp, 5705_PLUS)) {
14955                 tp->bufmgr_config.mbuf_read_dma_low_water =
14956                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14957                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14958                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14959                 tp->bufmgr_config.mbuf_high_water =
14960                         DEFAULT_MB_HIGH_WATER_5705;
14961                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14962                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14963                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14964                         tp->bufmgr_config.mbuf_high_water =
14965                                 DEFAULT_MB_HIGH_WATER_5906;
14966                 }
14967
14968                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14969                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14970                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14971                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14972                 tp->bufmgr_config.mbuf_high_water_jumbo =
14973                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14974         } else {
14975                 tp->bufmgr_config.mbuf_read_dma_low_water =
14976                         DEFAULT_MB_RDMA_LOW_WATER;
14977                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14978                         DEFAULT_MB_MACRX_LOW_WATER;
14979                 tp->bufmgr_config.mbuf_high_water =
14980                         DEFAULT_MB_HIGH_WATER;
14981
14982                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14983                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14984                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14985                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14986                 tp->bufmgr_config.mbuf_high_water_jumbo =
14987                         DEFAULT_MB_HIGH_WATER_JUMBO;
14988         }
14989
14990         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14991         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14992 }
14993
14994 static char * __devinit tg3_phy_string(struct tg3 *tp)
14995 {
14996         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14997         case TG3_PHY_ID_BCM5400:        return "5400";
14998         case TG3_PHY_ID_BCM5401:        return "5401";
14999         case TG3_PHY_ID_BCM5411:        return "5411";
15000         case TG3_PHY_ID_BCM5701:        return "5701";
15001         case TG3_PHY_ID_BCM5703:        return "5703";
15002         case TG3_PHY_ID_BCM5704:        return "5704";
15003         case TG3_PHY_ID_BCM5705:        return "5705";
15004         case TG3_PHY_ID_BCM5750:        return "5750";
15005         case TG3_PHY_ID_BCM5752:        return "5752";
15006         case TG3_PHY_ID_BCM5714:        return "5714";
15007         case TG3_PHY_ID_BCM5780:        return "5780";
15008         case TG3_PHY_ID_BCM5755:        return "5755";
15009         case TG3_PHY_ID_BCM5787:        return "5787";
15010         case TG3_PHY_ID_BCM5784:        return "5784";
15011         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15012         case TG3_PHY_ID_BCM5906:        return "5906";
15013         case TG3_PHY_ID_BCM5761:        return "5761";
15014         case TG3_PHY_ID_BCM5718C:       return "5718C";
15015         case TG3_PHY_ID_BCM5718S:       return "5718S";
15016         case TG3_PHY_ID_BCM57765:       return "57765";
15017         case TG3_PHY_ID_BCM5719C:       return "5719C";
15018         case TG3_PHY_ID_BCM5720C:       return "5720C";
15019         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15020         case 0:                 return "serdes";
15021         default:                return "unknown";
15022         }
15023 }
15024
15025 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15026 {
15027         if (tg3_flag(tp, PCI_EXPRESS)) {
15028                 strcpy(str, "PCI Express");
15029                 return str;
15030         } else if (tg3_flag(tp, PCIX_MODE)) {
15031                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15032
15033                 strcpy(str, "PCIX:");
15034
15035                 if ((clock_ctrl == 7) ||
15036                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15037                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15038                         strcat(str, "133MHz");
15039                 else if (clock_ctrl == 0)
15040                         strcat(str, "33MHz");
15041                 else if (clock_ctrl == 2)
15042                         strcat(str, "50MHz");
15043                 else if (clock_ctrl == 4)
15044                         strcat(str, "66MHz");
15045                 else if (clock_ctrl == 6)
15046                         strcat(str, "100MHz");
15047         } else {
15048                 strcpy(str, "PCI:");
15049                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15050                         strcat(str, "66MHz");
15051                 else
15052                         strcat(str, "33MHz");
15053         }
15054         if (tg3_flag(tp, PCI_32BIT))
15055                 strcat(str, ":32-bit");
15056         else
15057                 strcat(str, ":64-bit");
15058         return str;
15059 }
15060
15061 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15062 {
15063         struct pci_dev *peer;
15064         unsigned int func, devnr = tp->pdev->devfn & ~7;
15065
15066         for (func = 0; func < 8; func++) {
15067                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15068                 if (peer && peer != tp->pdev)
15069                         break;
15070                 pci_dev_put(peer);
15071         }
15072         /* 5704 can be configured in single-port mode, set peer to
15073          * tp->pdev in that case.
15074          */
15075         if (!peer) {
15076                 peer = tp->pdev;
15077                 return peer;
15078         }
15079
15080         /*
15081          * We don't need to keep the refcount elevated; there's no way
15082          * to remove one half of this device without removing the other
15083          */
15084         pci_dev_put(peer);
15085
15086         return peer;
15087 }
15088
15089 static void __devinit tg3_init_coal(struct tg3 *tp)
15090 {
15091         struct ethtool_coalesce *ec = &tp->coal;
15092
15093         memset(ec, 0, sizeof(*ec));
15094         ec->cmd = ETHTOOL_GCOALESCE;
15095         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15096         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15097         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15098         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15099         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15100         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15101         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15102         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15103         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15104
15105         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15106                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15107                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15108                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15109                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15110                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15111         }
15112
15113         if (tg3_flag(tp, 5705_PLUS)) {
15114                 ec->rx_coalesce_usecs_irq = 0;
15115                 ec->tx_coalesce_usecs_irq = 0;
15116                 ec->stats_block_coalesce_usecs = 0;
15117         }
15118 }
15119
15120 static const struct net_device_ops tg3_netdev_ops = {
15121         .ndo_open               = tg3_open,
15122         .ndo_stop               = tg3_close,
15123         .ndo_start_xmit         = tg3_start_xmit,
15124         .ndo_get_stats64        = tg3_get_stats64,
15125         .ndo_validate_addr      = eth_validate_addr,
15126         .ndo_set_multicast_list = tg3_set_rx_mode,
15127         .ndo_set_mac_address    = tg3_set_mac_addr,
15128         .ndo_do_ioctl           = tg3_ioctl,
15129         .ndo_tx_timeout         = tg3_tx_timeout,
15130         .ndo_change_mtu         = tg3_change_mtu,
15131         .ndo_fix_features       = tg3_fix_features,
15132         .ndo_set_features       = tg3_set_features,
15133 #ifdef CONFIG_NET_POLL_CONTROLLER
15134         .ndo_poll_controller    = tg3_poll_controller,
15135 #endif
15136 };
15137
15138 static int __devinit tg3_init_one(struct pci_dev *pdev,
15139                                   const struct pci_device_id *ent)
15140 {
15141         struct net_device *dev;
15142         struct tg3 *tp;
15143         int i, err, pm_cap;
15144         u32 sndmbx, rcvmbx, intmbx;
15145         char str[40];
15146         u64 dma_mask, persist_dma_mask;
15147         u32 features = 0;
15148
15149         printk_once(KERN_INFO "%s\n", version);
15150
15151         err = pci_enable_device(pdev);
15152         if (err) {
15153                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15154                 return err;
15155         }
15156
15157         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15158         if (err) {
15159                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15160                 goto err_out_disable_pdev;
15161         }
15162
15163         pci_set_master(pdev);
15164
15165         /* Find power-management capability. */
15166         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15167         if (pm_cap == 0) {
15168                 dev_err(&pdev->dev,
15169                         "Cannot find Power Management capability, aborting\n");
15170                 err = -EIO;
15171                 goto err_out_free_res;
15172         }
15173
15174         err = pci_set_power_state(pdev, PCI_D0);
15175         if (err) {
15176                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15177                 goto err_out_free_res;
15178         }
15179
15180         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15181         if (!dev) {
15182                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15183                 err = -ENOMEM;
15184                 goto err_out_power_down;
15185         }
15186
15187         SET_NETDEV_DEV(dev, &pdev->dev);
15188
15189         tp = netdev_priv(dev);
15190         tp->pdev = pdev;
15191         tp->dev = dev;
15192         tp->pm_cap = pm_cap;
15193         tp->rx_mode = TG3_DEF_RX_MODE;
15194         tp->tx_mode = TG3_DEF_TX_MODE;
15195
15196         if (tg3_debug > 0)
15197                 tp->msg_enable = tg3_debug;
15198         else
15199                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15200
15201         /* The word/byte swap controls here control register access byte
15202          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15203          * setting below.
15204          */
15205         tp->misc_host_ctrl =
15206                 MISC_HOST_CTRL_MASK_PCI_INT |
15207                 MISC_HOST_CTRL_WORD_SWAP |
15208                 MISC_HOST_CTRL_INDIR_ACCESS |
15209                 MISC_HOST_CTRL_PCISTATE_RW;
15210
15211         /* The NONFRM (non-frame) byte/word swap controls take effect
15212          * on descriptor entries, anything which isn't packet data.
15213          *
15214          * The StrongARM chips on the board (one for tx, one for rx)
15215          * are running in big-endian mode.
15216          */
15217         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15218                         GRC_MODE_WSWAP_NONFRM_DATA);
15219 #ifdef __BIG_ENDIAN
15220         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15221 #endif
15222         spin_lock_init(&tp->lock);
15223         spin_lock_init(&tp->indirect_lock);
15224         INIT_WORK(&tp->reset_task, tg3_reset_task);
15225
15226         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15227         if (!tp->regs) {
15228                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15229                 err = -ENOMEM;
15230                 goto err_out_free_dev;
15231         }
15232
15233         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15234             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15235             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15236             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15237             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15238             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15239             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15240             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15241                 tg3_flag_set(tp, ENABLE_APE);
15242                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15243                 if (!tp->aperegs) {
15244                         dev_err(&pdev->dev,
15245                                 "Cannot map APE registers, aborting\n");
15246                         err = -ENOMEM;
15247                         goto err_out_iounmap;
15248                 }
15249         }
15250
15251         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15252         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15253
15254         dev->ethtool_ops = &tg3_ethtool_ops;
15255         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15256         dev->netdev_ops = &tg3_netdev_ops;
15257         dev->irq = pdev->irq;
15258
15259         err = tg3_get_invariants(tp);
15260         if (err) {
15261                 dev_err(&pdev->dev,
15262                         "Problem fetching invariants of chip, aborting\n");
15263                 goto err_out_apeunmap;
15264         }
15265
15266         /* The EPB bridge inside 5714, 5715, and 5780 and any
15267          * device behind the EPB cannot support DMA addresses > 40-bit.
15268          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15269          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15270          * do DMA address check in tg3_start_xmit().
15271          */
15272         if (tg3_flag(tp, IS_5788))
15273                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15274         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15275                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15276 #ifdef CONFIG_HIGHMEM
15277                 dma_mask = DMA_BIT_MASK(64);
15278 #endif
15279         } else
15280                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15281
15282         /* Configure DMA attributes. */
15283         if (dma_mask > DMA_BIT_MASK(32)) {
15284                 err = pci_set_dma_mask(pdev, dma_mask);
15285                 if (!err) {
15286                         features |= NETIF_F_HIGHDMA;
15287                         err = pci_set_consistent_dma_mask(pdev,
15288                                                           persist_dma_mask);
15289                         if (err < 0) {
15290                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15291                                         "DMA for consistent allocations\n");
15292                                 goto err_out_apeunmap;
15293                         }
15294                 }
15295         }
15296         if (err || dma_mask == DMA_BIT_MASK(32)) {
15297                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15298                 if (err) {
15299                         dev_err(&pdev->dev,
15300                                 "No usable DMA configuration, aborting\n");
15301                         goto err_out_apeunmap;
15302                 }
15303         }
15304
15305         tg3_init_bufmgr_config(tp);
15306
15307         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15308
15309         /* 5700 B0 chips do not support checksumming correctly due
15310          * to hardware bugs.
15311          */
15312         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15313                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15314
15315                 if (tg3_flag(tp, 5755_PLUS))
15316                         features |= NETIF_F_IPV6_CSUM;
15317         }
15318
15319         /* TSO is on by default on chips that support hardware TSO.
15320          * Firmware TSO on older chips gives lower performance, so it
15321          * is off by default, but can be enabled using ethtool.
15322          */
15323         if ((tg3_flag(tp, HW_TSO_1) ||
15324              tg3_flag(tp, HW_TSO_2) ||
15325              tg3_flag(tp, HW_TSO_3)) &&
15326             (features & NETIF_F_IP_CSUM))
15327                 features |= NETIF_F_TSO;
15328         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15329                 if (features & NETIF_F_IPV6_CSUM)
15330                         features |= NETIF_F_TSO6;
15331                 if (tg3_flag(tp, HW_TSO_3) ||
15332                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15333                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15334                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15335                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15336                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15337                         features |= NETIF_F_TSO_ECN;
15338         }
15339
15340         dev->features |= features;
15341         dev->vlan_features |= features;
15342
15343         /*
15344          * Add loopback capability only for a subset of devices that support
15345          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15346          * loopback for the remaining devices.
15347          */
15348         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15349             !tg3_flag(tp, CPMU_PRESENT))
15350                 /* Add the loopback capability */
15351                 features |= NETIF_F_LOOPBACK;
15352
15353         dev->hw_features |= features;
15354
15355         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15356             !tg3_flag(tp, TSO_CAPABLE) &&
15357             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15358                 tg3_flag_set(tp, MAX_RXPEND_64);
15359                 tp->rx_pending = 63;
15360         }
15361
15362         err = tg3_get_device_address(tp);
15363         if (err) {
15364                 dev_err(&pdev->dev,
15365                         "Could not obtain valid ethernet address, aborting\n");
15366                 goto err_out_apeunmap;
15367         }
15368
15369         /*
15370          * Reset chip in case UNDI or EFI driver did not shutdown
15371          * DMA self test will enable WDMAC and we'll see (spurious)
15372          * pending DMA on the PCI bus at that point.
15373          */
15374         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15375             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15376                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15377                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15378         }
15379
15380         err = tg3_test_dma(tp);
15381         if (err) {
15382                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15383                 goto err_out_apeunmap;
15384         }
15385
15386         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15387         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15388         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15389         for (i = 0; i < tp->irq_max; i++) {
15390                 struct tg3_napi *tnapi = &tp->napi[i];
15391
15392                 tnapi->tp = tp;
15393                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15394
15395                 tnapi->int_mbox = intmbx;
15396                 if (i < 4)
15397                         intmbx += 0x8;
15398                 else
15399                         intmbx += 0x4;
15400
15401                 tnapi->consmbox = rcvmbx;
15402                 tnapi->prodmbox = sndmbx;
15403
15404                 if (i)
15405                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15406                 else
15407                         tnapi->coal_now = HOSTCC_MODE_NOW;
15408
15409                 if (!tg3_flag(tp, SUPPORT_MSIX))
15410                         break;
15411
15412                 /*
15413                  * If we support MSIX, we'll be using RSS.  If we're using
15414                  * RSS, the first vector only handles link interrupts and the
15415                  * remaining vectors handle rx and tx interrupts.  Reuse the
15416                  * mailbox values for the next iteration.  The values we setup
15417                  * above are still useful for the single vectored mode.
15418                  */
15419                 if (!i)
15420                         continue;
15421
15422                 rcvmbx += 0x8;
15423
15424                 if (sndmbx & 0x4)
15425                         sndmbx -= 0x4;
15426                 else
15427                         sndmbx += 0xc;
15428         }
15429
15430         tg3_init_coal(tp);
15431
15432         pci_set_drvdata(pdev, dev);
15433
15434         if (tg3_flag(tp, 5717_PLUS)) {
15435                 /* Resume a low-power mode */
15436                 tg3_frob_aux_power(tp, false);
15437         }
15438
15439         err = register_netdev(dev);
15440         if (err) {
15441                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15442                 goto err_out_apeunmap;
15443         }
15444
15445         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15446                     tp->board_part_number,
15447                     tp->pci_chip_rev_id,
15448                     tg3_bus_string(tp, str),
15449                     dev->dev_addr);
15450
15451         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15452                 struct phy_device *phydev;
15453                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15454                 netdev_info(dev,
15455                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15456                             phydev->drv->name, dev_name(&phydev->dev));
15457         } else {
15458                 char *ethtype;
15459
15460                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15461                         ethtype = "10/100Base-TX";
15462                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15463                         ethtype = "1000Base-SX";
15464                 else
15465                         ethtype = "10/100/1000Base-T";
15466
15467                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15468                             "(WireSpeed[%d], EEE[%d])\n",
15469                             tg3_phy_string(tp), ethtype,
15470                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15471                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15472         }
15473
15474         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15475                     (dev->features & NETIF_F_RXCSUM) != 0,
15476                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15477                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15478                     tg3_flag(tp, ENABLE_ASF) != 0,
15479                     tg3_flag(tp, TSO_CAPABLE) != 0);
15480         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15481                     tp->dma_rwctrl,
15482                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15483                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15484
15485         pci_save_state(pdev);
15486
15487         return 0;
15488
15489 err_out_apeunmap:
15490         if (tp->aperegs) {
15491                 iounmap(tp->aperegs);
15492                 tp->aperegs = NULL;
15493         }
15494
15495 err_out_iounmap:
15496         if (tp->regs) {
15497                 iounmap(tp->regs);
15498                 tp->regs = NULL;
15499         }
15500
15501 err_out_free_dev:
15502         free_netdev(dev);
15503
15504 err_out_power_down:
15505         pci_set_power_state(pdev, PCI_D3hot);
15506
15507 err_out_free_res:
15508         pci_release_regions(pdev);
15509
15510 err_out_disable_pdev:
15511         pci_disable_device(pdev);
15512         pci_set_drvdata(pdev, NULL);
15513         return err;
15514 }
15515
15516 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15517 {
15518         struct net_device *dev = pci_get_drvdata(pdev);
15519
15520         if (dev) {
15521                 struct tg3 *tp = netdev_priv(dev);
15522
15523                 if (tp->fw)
15524                         release_firmware(tp->fw);
15525
15526                 cancel_work_sync(&tp->reset_task);
15527
15528                 if (!tg3_flag(tp, USE_PHYLIB)) {
15529                         tg3_phy_fini(tp);
15530                         tg3_mdio_fini(tp);
15531                 }
15532
15533                 unregister_netdev(dev);
15534                 if (tp->aperegs) {
15535                         iounmap(tp->aperegs);
15536                         tp->aperegs = NULL;
15537                 }
15538                 if (tp->regs) {
15539                         iounmap(tp->regs);
15540                         tp->regs = NULL;
15541                 }
15542                 free_netdev(dev);
15543                 pci_release_regions(pdev);
15544                 pci_disable_device(pdev);
15545                 pci_set_drvdata(pdev, NULL);
15546         }
15547 }
15548
15549 #ifdef CONFIG_PM_SLEEP
15550 static int tg3_suspend(struct device *device)
15551 {
15552         struct pci_dev *pdev = to_pci_dev(device);
15553         struct net_device *dev = pci_get_drvdata(pdev);
15554         struct tg3 *tp = netdev_priv(dev);
15555         int err;
15556
15557         if (!netif_running(dev))
15558                 return 0;
15559
15560         flush_work_sync(&tp->reset_task);
15561         tg3_phy_stop(tp);
15562         tg3_netif_stop(tp);
15563
15564         del_timer_sync(&tp->timer);
15565
15566         tg3_full_lock(tp, 1);
15567         tg3_disable_ints(tp);
15568         tg3_full_unlock(tp);
15569
15570         netif_device_detach(dev);
15571
15572         tg3_full_lock(tp, 0);
15573         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15574         tg3_flag_clear(tp, INIT_COMPLETE);
15575         tg3_full_unlock(tp);
15576
15577         err = tg3_power_down_prepare(tp);
15578         if (err) {
15579                 int err2;
15580
15581                 tg3_full_lock(tp, 0);
15582
15583                 tg3_flag_set(tp, INIT_COMPLETE);
15584                 err2 = tg3_restart_hw(tp, 1);
15585                 if (err2)
15586                         goto out;
15587
15588                 tp->timer.expires = jiffies + tp->timer_offset;
15589                 add_timer(&tp->timer);
15590
15591                 netif_device_attach(dev);
15592                 tg3_netif_start(tp);
15593
15594 out:
15595                 tg3_full_unlock(tp);
15596
15597                 if (!err2)
15598                         tg3_phy_start(tp);
15599         }
15600
15601         return err;
15602 }
15603
15604 static int tg3_resume(struct device *device)
15605 {
15606         struct pci_dev *pdev = to_pci_dev(device);
15607         struct net_device *dev = pci_get_drvdata(pdev);
15608         struct tg3 *tp = netdev_priv(dev);
15609         int err;
15610
15611         if (!netif_running(dev))
15612                 return 0;
15613
15614         netif_device_attach(dev);
15615
15616         tg3_full_lock(tp, 0);
15617
15618         tg3_flag_set(tp, INIT_COMPLETE);
15619         err = tg3_restart_hw(tp, 1);
15620         if (err)
15621                 goto out;
15622
15623         tp->timer.expires = jiffies + tp->timer_offset;
15624         add_timer(&tp->timer);
15625
15626         tg3_netif_start(tp);
15627
15628 out:
15629         tg3_full_unlock(tp);
15630
15631         if (!err)
15632                 tg3_phy_start(tp);
15633
15634         return err;
15635 }
15636
15637 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15638 #define TG3_PM_OPS (&tg3_pm_ops)
15639
15640 #else
15641
15642 #define TG3_PM_OPS NULL
15643
15644 #endif /* CONFIG_PM_SLEEP */
15645
15646 /**
15647  * tg3_io_error_detected - called when PCI error is detected
15648  * @pdev: Pointer to PCI device
15649  * @state: The current pci connection state
15650  *
15651  * This function is called after a PCI bus error affecting
15652  * this device has been detected.
15653  */
15654 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15655                                               pci_channel_state_t state)
15656 {
15657         struct net_device *netdev = pci_get_drvdata(pdev);
15658         struct tg3 *tp = netdev_priv(netdev);
15659         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15660
15661         netdev_info(netdev, "PCI I/O error detected\n");
15662
15663         rtnl_lock();
15664
15665         if (!netif_running(netdev))
15666                 goto done;
15667
15668         tg3_phy_stop(tp);
15669
15670         tg3_netif_stop(tp);
15671
15672         del_timer_sync(&tp->timer);
15673         tg3_flag_clear(tp, RESTART_TIMER);
15674
15675         /* Want to make sure that the reset task doesn't run */
15676         cancel_work_sync(&tp->reset_task);
15677         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15678         tg3_flag_clear(tp, RESTART_TIMER);
15679
15680         netif_device_detach(netdev);
15681
15682         /* Clean up software state, even if MMIO is blocked */
15683         tg3_full_lock(tp, 0);
15684         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15685         tg3_full_unlock(tp);
15686
15687 done:
15688         if (state == pci_channel_io_perm_failure)
15689                 err = PCI_ERS_RESULT_DISCONNECT;
15690         else
15691                 pci_disable_device(pdev);
15692
15693         rtnl_unlock();
15694
15695         return err;
15696 }
15697
15698 /**
15699  * tg3_io_slot_reset - called after the pci bus has been reset.
15700  * @pdev: Pointer to PCI device
15701  *
15702  * Restart the card from scratch, as if from a cold-boot.
15703  * At this point, the card has exprienced a hard reset,
15704  * followed by fixups by BIOS, and has its config space
15705  * set up identically to what it was at cold boot.
15706  */
15707 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15708 {
15709         struct net_device *netdev = pci_get_drvdata(pdev);
15710         struct tg3 *tp = netdev_priv(netdev);
15711         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15712         int err;
15713
15714         rtnl_lock();
15715
15716         if (pci_enable_device(pdev)) {
15717                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15718                 goto done;
15719         }
15720
15721         pci_set_master(pdev);
15722         pci_restore_state(pdev);
15723         pci_save_state(pdev);
15724
15725         if (!netif_running(netdev)) {
15726                 rc = PCI_ERS_RESULT_RECOVERED;
15727                 goto done;
15728         }
15729
15730         err = tg3_power_up(tp);
15731         if (err)
15732                 goto done;
15733
15734         rc = PCI_ERS_RESULT_RECOVERED;
15735
15736 done:
15737         rtnl_unlock();
15738
15739         return rc;
15740 }
15741
15742 /**
15743  * tg3_io_resume - called when traffic can start flowing again.
15744  * @pdev: Pointer to PCI device
15745  *
15746  * This callback is called when the error recovery driver tells
15747  * us that its OK to resume normal operation.
15748  */
15749 static void tg3_io_resume(struct pci_dev *pdev)
15750 {
15751         struct net_device *netdev = pci_get_drvdata(pdev);
15752         struct tg3 *tp = netdev_priv(netdev);
15753         int err;
15754
15755         rtnl_lock();
15756
15757         if (!netif_running(netdev))
15758                 goto done;
15759
15760         tg3_full_lock(tp, 0);
15761         tg3_flag_set(tp, INIT_COMPLETE);
15762         err = tg3_restart_hw(tp, 1);
15763         tg3_full_unlock(tp);
15764         if (err) {
15765                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15766                 goto done;
15767         }
15768
15769         netif_device_attach(netdev);
15770
15771         tp->timer.expires = jiffies + tp->timer_offset;
15772         add_timer(&tp->timer);
15773
15774         tg3_netif_start(tp);
15775
15776         tg3_phy_start(tp);
15777
15778 done:
15779         rtnl_unlock();
15780 }
15781
15782 static struct pci_error_handlers tg3_err_handler = {
15783         .error_detected = tg3_io_error_detected,
15784         .slot_reset     = tg3_io_slot_reset,
15785         .resume         = tg3_io_resume
15786 };
15787
15788 static struct pci_driver tg3_driver = {
15789         .name           = DRV_MODULE_NAME,
15790         .id_table       = tg3_pci_tbl,
15791         .probe          = tg3_init_one,
15792         .remove         = __devexit_p(tg3_remove_one),
15793         .err_handler    = &tg3_err_handler,
15794         .driver.pm      = TG3_PM_OPS,
15795 };
15796
15797 static int __init tg3_init(void)
15798 {
15799         return pci_register_driver(&tg3_driver);
15800 }
15801
15802 static void __exit tg3_cleanup(void)
15803 {
15804         pci_unregister_driver(&tg3_driver);
15805 }
15806
15807 module_init(tg3_init);
15808 module_exit(tg3_cleanup);