a448177990fe4287b971e23b8b2171ed02f0ff2b
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     137
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 11, 2014"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     ETH_ZLEN
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
215 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217 #define FIRMWARE_TG3            "tigon/tg3.bin"
218 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
221
222 static char version[] =
223         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
239
240 static const struct pci_device_id tg3_pci_tbl[] = {
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268                         TG3_DRV_DATA_FLAG_5705_10_100},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290                         PCI_VENDOR_ID_LENOVO,
291                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356         {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362         const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364         { "rx_octets" },
365         { "rx_fragments" },
366         { "rx_ucast_packets" },
367         { "rx_mcast_packets" },
368         { "rx_bcast_packets" },
369         { "rx_fcs_errors" },
370         { "rx_align_errors" },
371         { "rx_xon_pause_rcvd" },
372         { "rx_xoff_pause_rcvd" },
373         { "rx_mac_ctrl_rcvd" },
374         { "rx_xoff_entered" },
375         { "rx_frame_too_long_errors" },
376         { "rx_jabbers" },
377         { "rx_undersize_packets" },
378         { "rx_in_length_errors" },
379         { "rx_out_length_errors" },
380         { "rx_64_or_less_octet_packets" },
381         { "rx_65_to_127_octet_packets" },
382         { "rx_128_to_255_octet_packets" },
383         { "rx_256_to_511_octet_packets" },
384         { "rx_512_to_1023_octet_packets" },
385         { "rx_1024_to_1522_octet_packets" },
386         { "rx_1523_to_2047_octet_packets" },
387         { "rx_2048_to_4095_octet_packets" },
388         { "rx_4096_to_8191_octet_packets" },
389         { "rx_8192_to_9022_octet_packets" },
390
391         { "tx_octets" },
392         { "tx_collisions" },
393
394         { "tx_xon_sent" },
395         { "tx_xoff_sent" },
396         { "tx_flow_control" },
397         { "tx_mac_errors" },
398         { "tx_single_collisions" },
399         { "tx_mult_collisions" },
400         { "tx_deferred" },
401         { "tx_excessive_collisions" },
402         { "tx_late_collisions" },
403         { "tx_collide_2times" },
404         { "tx_collide_3times" },
405         { "tx_collide_4times" },
406         { "tx_collide_5times" },
407         { "tx_collide_6times" },
408         { "tx_collide_7times" },
409         { "tx_collide_8times" },
410         { "tx_collide_9times" },
411         { "tx_collide_10times" },
412         { "tx_collide_11times" },
413         { "tx_collide_12times" },
414         { "tx_collide_13times" },
415         { "tx_collide_14times" },
416         { "tx_collide_15times" },
417         { "tx_ucast_packets" },
418         { "tx_mcast_packets" },
419         { "tx_bcast_packets" },
420         { "tx_carrier_sense_errors" },
421         { "tx_discards" },
422         { "tx_errors" },
423
424         { "dma_writeq_full" },
425         { "dma_write_prioq_full" },
426         { "rxbds_empty" },
427         { "rx_discards" },
428         { "rx_errors" },
429         { "rx_threshold_hit" },
430
431         { "dma_readq_full" },
432         { "dma_read_prioq_full" },
433         { "tx_comp_queue_full" },
434
435         { "ring_set_send_prod_index" },
436         { "ring_status_update" },
437         { "nic_irqs" },
438         { "nic_avoided_irqs" },
439         { "nic_tx_threshold_hit" },
440
441         { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST          0
446 #define TG3_LINK_TEST           1
447 #define TG3_REGISTER_TEST       2
448 #define TG3_MEMORY_TEST         3
449 #define TG3_MAC_LOOPB_TEST      4
450 #define TG3_PHY_LOOPB_TEST      5
451 #define TG3_EXT_LOOPB_TEST      6
452 #define TG3_INTERRUPT_TEST      7
453
454
455 static const struct {
456         const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
459         [TG3_LINK_TEST]         = { "link test         (online) " },
460         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
461         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
462         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
463         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
464         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
465         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
466 };
467
468 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
469
470
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473         writel(val, tp->regs + off);
474 }
475
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478         return readl(tp->regs + off);
479 }
480
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483         writel(val, tp->aperegs + off);
484 }
485
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488         return readl(tp->aperegs + off);
489 }
490
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503         writel(val, tp->regs + off);
504         readl(tp->regs + off);
505 }
506
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509         unsigned long flags;
510         u32 val;
511
512         spin_lock_irqsave(&tp->indirect_lock, flags);
513         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515         spin_unlock_irqrestore(&tp->indirect_lock, flags);
516         return val;
517 }
518
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521         unsigned long flags;
522
523         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525                                        TG3_64BIT_REG_LOW, val);
526                 return;
527         }
528         if (off == TG3_RX_STD_PROD_IDX_REG) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530                                        TG3_64BIT_REG_LOW, val);
531                 return;
532         }
533
534         spin_lock_irqsave(&tp->indirect_lock, flags);
535         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537         spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539         /* In indirect mode when disabling interrupts, we also need
540          * to clear the interrupt bit in the GRC local ctrl register.
541          */
542         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543             (val == 0x1)) {
544                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546         }
547 }
548
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551         unsigned long flags;
552         u32 val;
553
554         spin_lock_irqsave(&tp->indirect_lock, flags);
555         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557         spin_unlock_irqrestore(&tp->indirect_lock, flags);
558         return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562  * where it is unsafe to read back the register without some delay.
563  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565  */
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569                 /* Non-posted methods */
570                 tp->write32(tp, off, val);
571         else {
572                 /* Posted method */
573                 tg3_write32(tp, off, val);
574                 if (usec_wait)
575                         udelay(usec_wait);
576                 tp->read32(tp, off);
577         }
578         /* Wait again after the read for the posted method to guarantee that
579          * the wait time is met.
580          */
581         if (usec_wait)
582                 udelay(usec_wait);
583 }
584
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587         tp->write32_mbox(tp, off, val);
588         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590              !tg3_flag(tp, ICH_WORKAROUND)))
591                 tp->read32_mbox(tp, off);
592 }
593
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596         void __iomem *mbox = tp->regs + off;
597         writel(val, mbox);
598         if (tg3_flag(tp, TXD_MBOX_HWBUG))
599                 writel(val, mbox);
600         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601             tg3_flag(tp, FLUSH_POSTED_WRITES))
602                 readl(mbox);
603 }
604
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607         return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612         writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val)                  tp->write32(tp, reg, val)
622 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg)                       tp->read32(tp, reg)
625
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628         unsigned long flags;
629
630         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632                 return;
633
634         spin_lock_irqsave(&tp->indirect_lock, flags);
635         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639                 /* Always leave this as zero. */
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641         } else {
642                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645                 /* Always leave this as zero. */
646                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647         }
648         spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653         unsigned long flags;
654
655         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657                 *val = 0;
658                 return;
659         }
660
661         spin_lock_irqsave(&tp->indirect_lock, flags);
662         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666                 /* Always leave this as zero. */
667                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668         } else {
669                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670                 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672                 /* Always leave this as zero. */
673                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674         }
675         spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680         int i;
681         u32 regbase, bit;
682
683         if (tg3_asic_rev(tp) == ASIC_REV_5761)
684                 regbase = TG3_APE_LOCK_GRANT;
685         else
686                 regbase = TG3_APE_PER_LOCK_GRANT;
687
688         /* Make sure the driver hasn't any stale locks. */
689         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690                 switch (i) {
691                 case TG3_APE_LOCK_PHY0:
692                 case TG3_APE_LOCK_PHY1:
693                 case TG3_APE_LOCK_PHY2:
694                 case TG3_APE_LOCK_PHY3:
695                         bit = APE_LOCK_GRANT_DRIVER;
696                         break;
697                 default:
698                         if (!tp->pci_fn)
699                                 bit = APE_LOCK_GRANT_DRIVER;
700                         else
701                                 bit = 1 << tp->pci_fn;
702                 }
703                 tg3_ape_write32(tp, regbase + 4 * i, bit);
704         }
705
706 }
707
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710         int i, off;
711         int ret = 0;
712         u32 status, req, gnt, bit;
713
714         if (!tg3_flag(tp, ENABLE_APE))
715                 return 0;
716
717         switch (locknum) {
718         case TG3_APE_LOCK_GPIO:
719                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720                         return 0;
721         case TG3_APE_LOCK_GRC:
722         case TG3_APE_LOCK_MEM:
723                 if (!tp->pci_fn)
724                         bit = APE_LOCK_REQ_DRIVER;
725                 else
726                         bit = 1 << tp->pci_fn;
727                 break;
728         case TG3_APE_LOCK_PHY0:
729         case TG3_APE_LOCK_PHY1:
730         case TG3_APE_LOCK_PHY2:
731         case TG3_APE_LOCK_PHY3:
732                 bit = APE_LOCK_REQ_DRIVER;
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739                 req = TG3_APE_LOCK_REQ;
740                 gnt = TG3_APE_LOCK_GRANT;
741         } else {
742                 req = TG3_APE_PER_LOCK_REQ;
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744         }
745
746         off = 4 * locknum;
747
748         tg3_ape_write32(tp, req + off, bit);
749
750         /* Wait for up to 1 millisecond to acquire lock. */
751         for (i = 0; i < 100; i++) {
752                 status = tg3_ape_read32(tp, gnt + off);
753                 if (status == bit)
754                         break;
755                 if (pci_channel_offline(tp->pdev))
756                         break;
757
758                 udelay(10);
759         }
760
761         if (status != bit) {
762                 /* Revoke the lock request. */
763                 tg3_ape_write32(tp, gnt + off, bit);
764                 ret = -EBUSY;
765         }
766
767         return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772         u32 gnt, bit;
773
774         if (!tg3_flag(tp, ENABLE_APE))
775                 return;
776
777         switch (locknum) {
778         case TG3_APE_LOCK_GPIO:
779                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780                         return;
781         case TG3_APE_LOCK_GRC:
782         case TG3_APE_LOCK_MEM:
783                 if (!tp->pci_fn)
784                         bit = APE_LOCK_GRANT_DRIVER;
785                 else
786                         bit = 1 << tp->pci_fn;
787                 break;
788         case TG3_APE_LOCK_PHY0:
789         case TG3_APE_LOCK_PHY1:
790         case TG3_APE_LOCK_PHY2:
791         case TG3_APE_LOCK_PHY3:
792                 bit = APE_LOCK_GRANT_DRIVER;
793                 break;
794         default:
795                 return;
796         }
797
798         if (tg3_asic_rev(tp) == ASIC_REV_5761)
799                 gnt = TG3_APE_LOCK_GRANT;
800         else
801                 gnt = TG3_APE_PER_LOCK_GRANT;
802
803         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 apedata;
809
810         while (timeout_us) {
811                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812                         return -EBUSY;
813
814                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816                         break;
817
818                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820                 udelay(10);
821                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822         }
823
824         return timeout_us ? 0 : -EBUSY;
825 }
826
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829         u32 i, apedata;
830
831         for (i = 0; i < timeout_us / 10; i++) {
832                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835                         break;
836
837                 udelay(10);
838         }
839
840         return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844                                    u32 len)
845 {
846         int err;
847         u32 i, bufoff, msgoff, maxlen, apedata;
848
849         if (!tg3_flag(tp, APE_HAS_NCSI))
850                 return 0;
851
852         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853         if (apedata != APE_SEG_SIG_MAGIC)
854                 return -ENODEV;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857         if (!(apedata & APE_FW_STATUS_READY))
858                 return -EAGAIN;
859
860         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861                  TG3_APE_SHMEM_BASE;
862         msgoff = bufoff + 2 * sizeof(u32);
863         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865         while (len) {
866                 u32 length;
867
868                 /* Cap xfer sizes to scratchpad limits. */
869                 length = (len > maxlen) ? maxlen : len;
870                 len -= length;
871
872                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873                 if (!(apedata & APE_FW_STATUS_READY))
874                         return -EAGAIN;
875
876                 /* Wait for up to 1 msec for APE to service previous event. */
877                 err = tg3_ape_event_lock(tp, 1000);
878                 if (err)
879                         return err;
880
881                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882                           APE_EVENT_STATUS_SCRTCHPD_READ |
883                           APE_EVENT_STATUS_EVENT_PENDING;
884                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886                 tg3_ape_write32(tp, bufoff, base_off);
887                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892                 base_off += length;
893
894                 if (tg3_ape_wait_for_event(tp, 30000))
895                         return -EAGAIN;
896
897                 for (i = 0; length; i += 4, length -= 4) {
898                         u32 val = tg3_ape_read32(tp, msgoff + i);
899                         memcpy(data, &val, sizeof(u32));
900                         data++;
901                 }
902         }
903
904         return 0;
905 }
906
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909         int err;
910         u32 apedata;
911
912         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913         if (apedata != APE_SEG_SIG_MAGIC)
914                 return -EAGAIN;
915
916         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917         if (!(apedata & APE_FW_STATUS_READY))
918                 return -EAGAIN;
919
920         /* Wait for up to 1 millisecond for APE to service previous event. */
921         err = tg3_ape_event_lock(tp, 1000);
922         if (err)
923                 return err;
924
925         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926                         event | APE_EVENT_STATUS_EVENT_PENDING);
927
928         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931         return 0;
932 }
933
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936         u32 event;
937         u32 apedata;
938
939         if (!tg3_flag(tp, ENABLE_APE))
940                 return;
941
942         switch (kind) {
943         case RESET_KIND_INIT:
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945                                 APE_HOST_SEG_SIG_MAGIC);
946                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947                                 APE_HOST_SEG_LEN_MAGIC);
948                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953                                 APE_HOST_BEHAV_NO_PHYLOCK);
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955                                     TG3_APE_HOST_DRVR_STATE_START);
956
957                 event = APE_EVENT_STATUS_STATE_START;
958                 break;
959         case RESET_KIND_SHUTDOWN:
960                 /* With the interface we are currently using,
961                  * APE does not track driver state.  Wiping
962                  * out the HOST SEGMENT SIGNATURE forces
963                  * the APE to assume OS absent status.
964                  */
965                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967                 if (device_may_wakeup(&tp->pdev->dev) &&
968                     tg3_flag(tp, WOL_ENABLE)) {
969                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970                                             TG3_APE_HOST_WOL_SPEED_AUTO);
971                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972                 } else
973                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977                 event = APE_EVENT_STATUS_STATE_UNLOAD;
978                 break;
979         default:
980                 return;
981         }
982
983         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985         tg3_ape_send_event(tp, event);
986 }
987
988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990         int i;
991
992         tw32(TG3PCI_MISC_HOST_CTRL,
993              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994         for (i = 0; i < tp->irq_max; i++)
995                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997
998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000         int i;
1001
1002         tp->irq_sync = 0;
1003         wmb();
1004
1005         tw32(TG3PCI_MISC_HOST_CTRL,
1006              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009         for (i = 0; i < tp->irq_cnt; i++) {
1010                 struct tg3_napi *tnapi = &tp->napi[i];
1011
1012                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013                 if (tg3_flag(tp, 1SHOT_MSI))
1014                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016                 tp->coal_now |= tnapi->coal_now;
1017         }
1018
1019         /* Force an initial interrupt */
1020         if (!tg3_flag(tp, TAGGED_STATUS) &&
1021             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023         else
1024                 tw32(HOSTCC_MODE, tp->coal_now);
1025
1026         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031         struct tg3 *tp = tnapi->tp;
1032         struct tg3_hw_status *sblk = tnapi->hw_status;
1033         unsigned int work_exists = 0;
1034
1035         /* check for phy events */
1036         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037                 if (sblk->status & SD_STATUS_LINK_CHG)
1038                         work_exists = 1;
1039         }
1040
1041         /* check for TX work to do */
1042         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043                 work_exists = 1;
1044
1045         /* check for RX work to do */
1046         if (tnapi->rx_rcb_prod_idx &&
1047             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048                 work_exists = 1;
1049
1050         return work_exists;
1051 }
1052
1053 /* tg3_int_reenable
1054  *  similar to tg3_enable_ints, but it accurately determines whether there
1055  *  is new work pending and can return without flushing the PIO write
1056  *  which reenables interrupts
1057  */
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060         struct tg3 *tp = tnapi->tp;
1061
1062         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063         mmiowb();
1064
1065         /* When doing tagged status, this work check is unnecessary.
1066          * The last_tag we write above tells the chip which piece of
1067          * work we've completed.
1068          */
1069         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073
1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076         u32 clock_ctrl;
1077         u32 orig_clock_ctrl;
1078
1079         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080                 return;
1081
1082         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084         orig_clock_ctrl = clock_ctrl;
1085         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086                        CLOCK_CTRL_CLKRUN_OENABLE |
1087                        0x1f);
1088         tp->pci_clock_ctrl = clock_ctrl;
1089
1090         if (tg3_flag(tp, 5705_PLUS)) {
1091                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094                 }
1095         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097                             clock_ctrl |
1098                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099                             40);
1100                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102                             40);
1103         }
1104         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106
1107 #define PHY_BUSY_LOOPS  5000
1108
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110                          u32 *val)
1111 {
1112         u32 frame_val;
1113         unsigned int loops;
1114         int ret;
1115
1116         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117                 tw32_f(MAC_MI_MODE,
1118                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119                 udelay(80);
1120         }
1121
1122         tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124         *val = 0x0;
1125
1126         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132         tw32_f(MAC_MI_COM, frame_val);
1133
1134         loops = PHY_BUSY_LOOPS;
1135         while (loops != 0) {
1136                 udelay(10);
1137                 frame_val = tr32(MAC_MI_COM);
1138
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0) {
1149                 *val = frame_val & MI_COM_DATA_MASK;
1150                 ret = 0;
1151         }
1152
1153         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155                 udelay(80);
1156         }
1157
1158         tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160         return ret;
1161 }
1162
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169                           u32 val)
1170 {
1171         u32 frame_val;
1172         unsigned int loops;
1173         int ret;
1174
1175         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177                 return 0;
1178
1179         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180                 tw32_f(MAC_MI_MODE,
1181                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182                 udelay(80);
1183         }
1184
1185         tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188                       MI_COM_PHY_ADDR_MASK);
1189         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190                       MI_COM_REG_ADDR_MASK);
1191         frame_val |= (val & MI_COM_DATA_MASK);
1192         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194         tw32_f(MAC_MI_COM, frame_val);
1195
1196         loops = PHY_BUSY_LOOPS;
1197         while (loops != 0) {
1198                 udelay(10);
1199                 frame_val = tr32(MAC_MI_COM);
1200                 if ((frame_val & MI_COM_BUSY) == 0) {
1201                         udelay(5);
1202                         frame_val = tr32(MAC_MI_COM);
1203                         break;
1204                 }
1205                 loops -= 1;
1206         }
1207
1208         ret = -EBUSY;
1209         if (loops != 0)
1210                 ret = 0;
1211
1212         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214                 udelay(80);
1215         }
1216
1217         tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219         return ret;
1220 }
1221
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229         int err;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236         if (err)
1237                 goto done;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241         if (err)
1242                 goto done;
1243
1244         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246 done:
1247         return err;
1248 }
1249
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252         int err;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259         if (err)
1260                 goto done;
1261
1262         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264         if (err)
1265                 goto done;
1266
1267         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269 done:
1270         return err;
1271 }
1272
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275         int err;
1276
1277         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278         if (!err)
1279                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281         return err;
1282 }
1283
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286         int err;
1287
1288         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289         if (!err)
1290                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292         return err;
1293 }
1294
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297         int err;
1298
1299         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1302         if (!err)
1303                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305         return err;
1306 }
1307
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311                 set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318         u32 val;
1319         int err;
1320
1321         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323         if (err)
1324                 return err;
1325
1326         if (enable)
1327                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328         else
1329                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334         return err;
1335 }
1336
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340                             reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345         u32 phy_control;
1346         int limit, err;
1347
1348         /* OK, reset it, and poll the BMCR_RESET bit until it
1349          * clears or we time out.
1350          */
1351         phy_control = BMCR_RESET;
1352         err = tg3_writephy(tp, MII_BMCR, phy_control);
1353         if (err != 0)
1354                 return -EBUSY;
1355
1356         limit = 5000;
1357         while (limit--) {
1358                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359                 if (err != 0)
1360                         return -EBUSY;
1361
1362                 if ((phy_control & BMCR_RESET) == 0) {
1363                         udelay(40);
1364                         break;
1365                 }
1366                 udelay(10);
1367         }
1368         if (limit < 0)
1369                 return -EBUSY;
1370
1371         return 0;
1372 }
1373
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376         struct tg3 *tp = bp->priv;
1377         u32 val;
1378
1379         spin_lock_bh(&tp->lock);
1380
1381         if (__tg3_readphy(tp, mii_id, reg, &val))
1382                 val = -EIO;
1383
1384         spin_unlock_bh(&tp->lock);
1385
1386         return val;
1387 }
1388
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391         struct tg3 *tp = bp->priv;
1392         u32 ret = 0;
1393
1394         spin_lock_bh(&tp->lock);
1395
1396         if (__tg3_writephy(tp, mii_id, reg, val))
1397                 ret = -EIO;
1398
1399         spin_unlock_bh(&tp->lock);
1400
1401         return ret;
1402 }
1403
1404 static void tg3_mdio_config_5785(struct tg3 *tp)
1405 {
1406         u32 val;
1407         struct phy_device *phydev;
1408
1409         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1410         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411         case PHY_ID_BCM50610:
1412         case PHY_ID_BCM50610M:
1413                 val = MAC_PHYCFG2_50610_LED_MODES;
1414                 break;
1415         case PHY_ID_BCMAC131:
1416                 val = MAC_PHYCFG2_AC131_LED_MODES;
1417                 break;
1418         case PHY_ID_RTL8211C:
1419                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1420                 break;
1421         case PHY_ID_RTL8201E:
1422                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423                 break;
1424         default:
1425                 return;
1426         }
1427
1428         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429                 tw32(MAC_PHYCFG2, val);
1430
1431                 val = tr32(MAC_PHYCFG1);
1432                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1433                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435                 tw32(MAC_PHYCFG1, val);
1436
1437                 return;
1438         }
1439
1440         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442                        MAC_PHYCFG2_FMODE_MASK_MASK |
1443                        MAC_PHYCFG2_GMODE_MASK_MASK |
1444                        MAC_PHYCFG2_ACT_MASK_MASK   |
1445                        MAC_PHYCFG2_QUAL_MASK_MASK |
1446                        MAC_PHYCFG2_INBAND_ENABLE;
1447
1448         tw32(MAC_PHYCFG2, val);
1449
1450         val = tr32(MAC_PHYCFG1);
1451         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1458         }
1459         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461         tw32(MAC_PHYCFG1, val);
1462
1463         val = tr32(MAC_EXT_RGMII_MODE);
1464         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465                  MAC_RGMII_MODE_RX_QUALITY |
1466                  MAC_RGMII_MODE_RX_ACTIVITY |
1467                  MAC_RGMII_MODE_RX_ENG_DET |
1468                  MAC_RGMII_MODE_TX_ENABLE |
1469                  MAC_RGMII_MODE_TX_LOWPWR |
1470                  MAC_RGMII_MODE_TX_RESET);
1471         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473                         val |= MAC_RGMII_MODE_RX_INT_B |
1474                                MAC_RGMII_MODE_RX_QUALITY |
1475                                MAC_RGMII_MODE_RX_ACTIVITY |
1476                                MAC_RGMII_MODE_RX_ENG_DET;
1477                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478                         val |= MAC_RGMII_MODE_TX_ENABLE |
1479                                MAC_RGMII_MODE_TX_LOWPWR |
1480                                MAC_RGMII_MODE_TX_RESET;
1481         }
1482         tw32(MAC_EXT_RGMII_MODE, val);
1483 }
1484
1485 static void tg3_mdio_start(struct tg3 *tp)
1486 {
1487         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488         tw32_f(MAC_MI_MODE, tp->mi_mode);
1489         udelay(80);
1490
1491         if (tg3_flag(tp, MDIOBUS_INITED) &&
1492             tg3_asic_rev(tp) == ASIC_REV_5785)
1493                 tg3_mdio_config_5785(tp);
1494 }
1495
1496 static int tg3_mdio_init(struct tg3 *tp)
1497 {
1498         int i;
1499         u32 reg;
1500         struct phy_device *phydev;
1501
1502         if (tg3_flag(tp, 5717_PLUS)) {
1503                 u32 is_serdes;
1504
1505                 tp->phy_addr = tp->pci_fn + 1;
1506
1507                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1509                 else
1510                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1512                 if (is_serdes)
1513                         tp->phy_addr += 7;
1514         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1515                 int addr;
1516
1517                 addr = ssb_gige_get_phyaddr(tp->pdev);
1518                 if (addr < 0)
1519                         return addr;
1520                 tp->phy_addr = addr;
1521         } else
1522                 tp->phy_addr = TG3_PHY_MII_ADDR;
1523
1524         tg3_mdio_start(tp);
1525
1526         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1527                 return 0;
1528
1529         tp->mdio_bus = mdiobus_alloc();
1530         if (tp->mdio_bus == NULL)
1531                 return -ENOMEM;
1532
1533         tp->mdio_bus->name     = "tg3 mdio bus";
1534         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536         tp->mdio_bus->priv     = tp;
1537         tp->mdio_bus->parent   = &tp->pdev->dev;
1538         tp->mdio_bus->read     = &tg3_mdio_read;
1539         tp->mdio_bus->write    = &tg3_mdio_write;
1540         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1541
1542         /* The bus registration will look for all the PHYs on the mdio bus.
1543          * Unfortunately, it does not ensure the PHY is powered up before
1544          * accessing the PHY ID registers.  A chip reset is the
1545          * quickest way to bring the device back to an operational state..
1546          */
1547         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1548                 tg3_bmcr_reset(tp);
1549
1550         i = mdiobus_register(tp->mdio_bus);
1551         if (i) {
1552                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1553                 mdiobus_free(tp->mdio_bus);
1554                 return i;
1555         }
1556
1557         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1558
1559         if (!phydev || !phydev->drv) {
1560                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1561                 mdiobus_unregister(tp->mdio_bus);
1562                 mdiobus_free(tp->mdio_bus);
1563                 return -ENODEV;
1564         }
1565
1566         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1567         case PHY_ID_BCM57780:
1568                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1569                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1570                 break;
1571         case PHY_ID_BCM50610:
1572         case PHY_ID_BCM50610M:
1573                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1574                                      PHY_BRCM_RX_REFCLK_UNUSED |
1575                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1576                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1578                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1579                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1580                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1581                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1582                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1583                 /* fallthru */
1584         case PHY_ID_RTL8211C:
1585                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1586                 break;
1587         case PHY_ID_RTL8201E:
1588         case PHY_ID_BCMAC131:
1589                 phydev->interface = PHY_INTERFACE_MODE_MII;
1590                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1592                 break;
1593         }
1594
1595         tg3_flag_set(tp, MDIOBUS_INITED);
1596
1597         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598                 tg3_mdio_config_5785(tp);
1599
1600         return 0;
1601 }
1602
1603 static void tg3_mdio_fini(struct tg3 *tp)
1604 {
1605         if (tg3_flag(tp, MDIOBUS_INITED)) {
1606                 tg3_flag_clear(tp, MDIOBUS_INITED);
1607                 mdiobus_unregister(tp->mdio_bus);
1608                 mdiobus_free(tp->mdio_bus);
1609         }
1610 }
1611
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1614 {
1615         u32 val;
1616
1617         val = tr32(GRC_RX_CPU_EVENT);
1618         val |= GRC_RX_CPU_DRIVER_EVENT;
1619         tw32_f(GRC_RX_CPU_EVENT, val);
1620
1621         tp->last_event_jiffies = jiffies;
1622 }
1623
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1625
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1628 {
1629         int i;
1630         unsigned int delay_cnt;
1631         long time_remain;
1632
1633         /* If enough time has passed, no wait is necessary. */
1634         time_remain = (long)(tp->last_event_jiffies + 1 +
1635                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1636                       (long)jiffies;
1637         if (time_remain < 0)
1638                 return;
1639
1640         /* Check if we can shorten the wait time. */
1641         delay_cnt = jiffies_to_usecs(time_remain);
1642         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644         delay_cnt = (delay_cnt >> 3) + 1;
1645
1646         for (i = 0; i < delay_cnt; i++) {
1647                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1648                         break;
1649                 if (pci_channel_offline(tp->pdev))
1650                         break;
1651
1652                 udelay(8);
1653         }
1654 }
1655
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1658 {
1659         u32 reg, val;
1660
1661         val = 0;
1662         if (!tg3_readphy(tp, MII_BMCR, &reg))
1663                 val = reg << 16;
1664         if (!tg3_readphy(tp, MII_BMSR, &reg))
1665                 val |= (reg & 0xffff);
1666         *data++ = val;
1667
1668         val = 0;
1669         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1670                 val = reg << 16;
1671         if (!tg3_readphy(tp, MII_LPA, &reg))
1672                 val |= (reg & 0xffff);
1673         *data++ = val;
1674
1675         val = 0;
1676         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1678                         val = reg << 16;
1679                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1680                         val |= (reg & 0xffff);
1681         }
1682         *data++ = val;
1683
1684         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1685                 val = reg << 16;
1686         else
1687                 val = 0;
1688         *data++ = val;
1689 }
1690
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1693 {
1694         u32 data[4];
1695
1696         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1697                 return;
1698
1699         tg3_phy_gather_ump_data(tp, data);
1700
1701         tg3_wait_for_event_ack(tp);
1702
1703         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1709
1710         tg3_generate_fw_event(tp);
1711 }
1712
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1715 {
1716         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717                 /* Wait for RX cpu to ACK the previous event. */
1718                 tg3_wait_for_event_ack(tp);
1719
1720                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1721
1722                 tg3_generate_fw_event(tp);
1723
1724                 /* Wait for RX cpu to ACK this event. */
1725                 tg3_wait_for_event_ack(tp);
1726         }
1727 }
1728
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1731 {
1732         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1734
1735         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1736                 switch (kind) {
1737                 case RESET_KIND_INIT:
1738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739                                       DRV_STATE_START);
1740                         break;
1741
1742                 case RESET_KIND_SHUTDOWN:
1743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744                                       DRV_STATE_UNLOAD);
1745                         break;
1746
1747                 case RESET_KIND_SUSPEND:
1748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749                                       DRV_STATE_SUSPEND);
1750                         break;
1751
1752                 default:
1753                         break;
1754                 }
1755         }
1756 }
1757
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1760 {
1761         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1762                 switch (kind) {
1763                 case RESET_KIND_INIT:
1764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765                                       DRV_STATE_START_DONE);
1766                         break;
1767
1768                 case RESET_KIND_SHUTDOWN:
1769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770                                       DRV_STATE_UNLOAD_DONE);
1771                         break;
1772
1773                 default:
1774                         break;
1775                 }
1776         }
1777 }
1778
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 {
1782         if (tg3_flag(tp, ENABLE_ASF)) {
1783                 switch (kind) {
1784                 case RESET_KIND_INIT:
1785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786                                       DRV_STATE_START);
1787                         break;
1788
1789                 case RESET_KIND_SHUTDOWN:
1790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791                                       DRV_STATE_UNLOAD);
1792                         break;
1793
1794                 case RESET_KIND_SUSPEND:
1795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796                                       DRV_STATE_SUSPEND);
1797                         break;
1798
1799                 default:
1800                         break;
1801                 }
1802         }
1803 }
1804
1805 static int tg3_poll_fw(struct tg3 *tp)
1806 {
1807         int i;
1808         u32 val;
1809
1810         if (tg3_flag(tp, NO_FWARE_REPORTED))
1811                 return 0;
1812
1813         if (tg3_flag(tp, IS_SSB_CORE)) {
1814                 /* We don't use firmware. */
1815                 return 0;
1816         }
1817
1818         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819                 /* Wait up to 20ms for init done. */
1820                 for (i = 0; i < 200; i++) {
1821                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822                                 return 0;
1823                         if (pci_channel_offline(tp->pdev))
1824                                 return -ENODEV;
1825
1826                         udelay(100);
1827                 }
1828                 return -ENODEV;
1829         }
1830
1831         /* Wait for firmware initialization to complete. */
1832         for (i = 0; i < 100000; i++) {
1833                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835                         break;
1836                 if (pci_channel_offline(tp->pdev)) {
1837                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839                                 netdev_info(tp->dev, "No firmware running\n");
1840                         }
1841
1842                         break;
1843                 }
1844
1845                 udelay(10);
1846         }
1847
1848         /* Chip might not be fitted with firmware.  Some Sun onboard
1849          * parts are configured like that.  So don't signal the timeout
1850          * of the above loop as an error, but do report the lack of
1851          * running firmware once.
1852          */
1853         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1855
1856                 netdev_info(tp->dev, "No firmware running\n");
1857         }
1858
1859         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860                 /* The 57765 A0 needs a little more
1861                  * time to do some important work.
1862                  */
1863                 mdelay(10);
1864         }
1865
1866         return 0;
1867 }
1868
1869 static void tg3_link_report(struct tg3 *tp)
1870 {
1871         if (!netif_carrier_ok(tp->dev)) {
1872                 netif_info(tp, link, tp->dev, "Link is down\n");
1873                 tg3_ump_link_report(tp);
1874         } else if (netif_msg_link(tp)) {
1875                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876                             (tp->link_config.active_speed == SPEED_1000 ?
1877                              1000 :
1878                              (tp->link_config.active_speed == SPEED_100 ?
1879                               100 : 10)),
1880                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1881                              "full" : "half"));
1882
1883                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885                             "on" : "off",
1886                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1887                             "on" : "off");
1888
1889                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890                         netdev_info(tp->dev, "EEE is %s\n",
1891                                     tp->setlpicnt ? "enabled" : "disabled");
1892
1893                 tg3_ump_link_report(tp);
1894         }
1895
1896         tp->link_up = netif_carrier_ok(tp->dev);
1897 }
1898
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1900 {
1901         u32 flowctrl = 0;
1902
1903         if (adv & ADVERTISE_PAUSE_CAP) {
1904                 flowctrl |= FLOW_CTRL_RX;
1905                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906                         flowctrl |= FLOW_CTRL_TX;
1907         } else if (adv & ADVERTISE_PAUSE_ASYM)
1908                 flowctrl |= FLOW_CTRL_TX;
1909
1910         return flowctrl;
1911 }
1912
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1914 {
1915         u16 miireg;
1916
1917         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918                 miireg = ADVERTISE_1000XPAUSE;
1919         else if (flow_ctrl & FLOW_CTRL_TX)
1920                 miireg = ADVERTISE_1000XPSE_ASYM;
1921         else if (flow_ctrl & FLOW_CTRL_RX)
1922                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923         else
1924                 miireg = 0;
1925
1926         return miireg;
1927 }
1928
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1930 {
1931         u32 flowctrl = 0;
1932
1933         if (adv & ADVERTISE_1000XPAUSE) {
1934                 flowctrl |= FLOW_CTRL_RX;
1935                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936                         flowctrl |= FLOW_CTRL_TX;
1937         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938                 flowctrl |= FLOW_CTRL_TX;
1939
1940         return flowctrl;
1941 }
1942
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1944 {
1945         u8 cap = 0;
1946
1947         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950                 if (lcladv & ADVERTISE_1000XPAUSE)
1951                         cap = FLOW_CTRL_RX;
1952                 if (rmtadv & ADVERTISE_1000XPAUSE)
1953                         cap = FLOW_CTRL_TX;
1954         }
1955
1956         return cap;
1957 }
1958
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1960 {
1961         u8 autoneg;
1962         u8 flowctrl = 0;
1963         u32 old_rx_mode = tp->rx_mode;
1964         u32 old_tx_mode = tp->tx_mode;
1965
1966         if (tg3_flag(tp, USE_PHYLIB))
1967                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1968         else
1969                 autoneg = tp->link_config.autoneg;
1970
1971         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974                 else
1975                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976         } else
1977                 flowctrl = tp->link_config.flowctrl;
1978
1979         tp->link_config.active_flowctrl = flowctrl;
1980
1981         if (flowctrl & FLOW_CTRL_RX)
1982                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983         else
1984                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985
1986         if (old_rx_mode != tp->rx_mode)
1987                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1988
1989         if (flowctrl & FLOW_CTRL_TX)
1990                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991         else
1992                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993
1994         if (old_tx_mode != tp->tx_mode)
1995                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1996 }
1997
1998 static void tg3_adjust_link(struct net_device *dev)
1999 {
2000         u8 oldflowctrl, linkmesg = 0;
2001         u32 mac_mode, lcl_adv, rmt_adv;
2002         struct tg3 *tp = netdev_priv(dev);
2003         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2004
2005         spin_lock_bh(&tp->lock);
2006
2007         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008                                     MAC_MODE_HALF_DUPLEX);
2009
2010         oldflowctrl = tp->link_config.active_flowctrl;
2011
2012         if (phydev->link) {
2013                 lcl_adv = 0;
2014                 rmt_adv = 0;
2015
2016                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2018                 else if (phydev->speed == SPEED_1000 ||
2019                          tg3_asic_rev(tp) != ASIC_REV_5785)
2020                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021                 else
2022                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2023
2024                 if (phydev->duplex == DUPLEX_HALF)
2025                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2026                 else {
2027                         lcl_adv = mii_advertise_flowctrl(
2028                                   tp->link_config.flowctrl);
2029
2030                         if (phydev->pause)
2031                                 rmt_adv = LPA_PAUSE_CAP;
2032                         if (phydev->asym_pause)
2033                                 rmt_adv |= LPA_PAUSE_ASYM;
2034                 }
2035
2036                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037         } else
2038                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040         if (mac_mode != tp->mac_mode) {
2041                 tp->mac_mode = mac_mode;
2042                 tw32_f(MAC_MODE, tp->mac_mode);
2043                 udelay(40);
2044         }
2045
2046         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047                 if (phydev->speed == SPEED_10)
2048                         tw32(MAC_MI_STAT,
2049                              MAC_MI_STAT_10MBPS_MODE |
2050                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051                 else
2052                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053         }
2054
2055         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056                 tw32(MAC_TX_LENGTHS,
2057                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058                       (6 << TX_LENGTHS_IPG_SHIFT) |
2059                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060         else
2061                 tw32(MAC_TX_LENGTHS,
2062                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063                       (6 << TX_LENGTHS_IPG_SHIFT) |
2064                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065
2066         if (phydev->link != tp->old_link ||
2067             phydev->speed != tp->link_config.active_speed ||
2068             phydev->duplex != tp->link_config.active_duplex ||
2069             oldflowctrl != tp->link_config.active_flowctrl)
2070                 linkmesg = 1;
2071
2072         tp->old_link = phydev->link;
2073         tp->link_config.active_speed = phydev->speed;
2074         tp->link_config.active_duplex = phydev->duplex;
2075
2076         spin_unlock_bh(&tp->lock);
2077
2078         if (linkmesg)
2079                 tg3_link_report(tp);
2080 }
2081
2082 static int tg3_phy_init(struct tg3 *tp)
2083 {
2084         struct phy_device *phydev;
2085
2086         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2087                 return 0;
2088
2089         /* Bring the PHY back to a known state. */
2090         tg3_bmcr_reset(tp);
2091
2092         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2093
2094         /* Attach the MAC to the PHY. */
2095         phydev = phy_connect(tp->dev, phydev_name(phydev),
2096                              tg3_adjust_link, phydev->interface);
2097         if (IS_ERR(phydev)) {
2098                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099                 return PTR_ERR(phydev);
2100         }
2101
2102         /* Mask with MAC supported features. */
2103         switch (phydev->interface) {
2104         case PHY_INTERFACE_MODE_GMII:
2105         case PHY_INTERFACE_MODE_RGMII:
2106                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107                         phydev->supported &= (PHY_GBIT_FEATURES |
2108                                               SUPPORTED_Pause |
2109                                               SUPPORTED_Asym_Pause);
2110                         break;
2111                 }
2112                 /* fallthru */
2113         case PHY_INTERFACE_MODE_MII:
2114                 phydev->supported &= (PHY_BASIC_FEATURES |
2115                                       SUPPORTED_Pause |
2116                                       SUPPORTED_Asym_Pause);
2117                 break;
2118         default:
2119                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120                 return -EINVAL;
2121         }
2122
2123         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124
2125         phydev->advertising = phydev->supported;
2126
2127         phy_attached_info(phydev);
2128
2129         return 0;
2130 }
2131
2132 static void tg3_phy_start(struct tg3 *tp)
2133 {
2134         struct phy_device *phydev;
2135
2136         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2137                 return;
2138
2139         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2140
2141         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2142                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2143                 phydev->speed = tp->link_config.speed;
2144                 phydev->duplex = tp->link_config.duplex;
2145                 phydev->autoneg = tp->link_config.autoneg;
2146                 phydev->advertising = tp->link_config.advertising;
2147         }
2148
2149         phy_start(phydev);
2150
2151         phy_start_aneg(phydev);
2152 }
2153
2154 static void tg3_phy_stop(struct tg3 *tp)
2155 {
2156         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2157                 return;
2158
2159         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2160 }
2161
2162 static void tg3_phy_fini(struct tg3 *tp)
2163 {
2164         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2165                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2166                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2167         }
2168 }
2169
2170 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2171 {
2172         int err;
2173         u32 val;
2174
2175         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2176                 return 0;
2177
2178         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2179                 /* Cannot do read-modify-write on 5401 */
2180                 err = tg3_phy_auxctl_write(tp,
2181                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2182                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2183                                            0x4c20);
2184                 goto done;
2185         }
2186
2187         err = tg3_phy_auxctl_read(tp,
2188                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2189         if (err)
2190                 return err;
2191
2192         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2193         err = tg3_phy_auxctl_write(tp,
2194                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2195
2196 done:
2197         return err;
2198 }
2199
2200 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2201 {
2202         u32 phytest;
2203
2204         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2205                 u32 phy;
2206
2207                 tg3_writephy(tp, MII_TG3_FET_TEST,
2208                              phytest | MII_TG3_FET_SHADOW_EN);
2209                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2210                         if (enable)
2211                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212                         else
2213                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2215                 }
2216                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2217         }
2218 }
2219
2220 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2221 {
2222         u32 reg;
2223
2224         if (!tg3_flag(tp, 5705_PLUS) ||
2225             (tg3_flag(tp, 5717_PLUS) &&
2226              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2227                 return;
2228
2229         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2230                 tg3_phy_fet_toggle_apd(tp, enable);
2231                 return;
2232         }
2233
2234         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2235               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2236               MII_TG3_MISC_SHDW_SCR5_SDTL |
2237               MII_TG3_MISC_SHDW_SCR5_C125OE;
2238         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2239                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2240
2241         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2242
2243
2244         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2245         if (enable)
2246                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2247
2248         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2249 }
2250
2251 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2252 {
2253         u32 phy;
2254
2255         if (!tg3_flag(tp, 5705_PLUS) ||
2256             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2257                 return;
2258
2259         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2260                 u32 ephy;
2261
2262                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2263                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2264
2265                         tg3_writephy(tp, MII_TG3_FET_TEST,
2266                                      ephy | MII_TG3_FET_SHADOW_EN);
2267                         if (!tg3_readphy(tp, reg, &phy)) {
2268                                 if (enable)
2269                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270                                 else
2271                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272                                 tg3_writephy(tp, reg, phy);
2273                         }
2274                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2275                 }
2276         } else {
2277                 int ret;
2278
2279                 ret = tg3_phy_auxctl_read(tp,
2280                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2281                 if (!ret) {
2282                         if (enable)
2283                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284                         else
2285                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286                         tg3_phy_auxctl_write(tp,
2287                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2288                 }
2289         }
2290 }
2291
2292 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2293 {
2294         int ret;
2295         u32 val;
2296
2297         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2298                 return;
2299
2300         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2301         if (!ret)
2302                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2303                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2304 }
2305
2306 static void tg3_phy_apply_otp(struct tg3 *tp)
2307 {
2308         u32 otp, phy;
2309
2310         if (!tp->phy_otp)
2311                 return;
2312
2313         otp = tp->phy_otp;
2314
2315         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2316                 return;
2317
2318         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2319         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2320         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2321
2322         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2323               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2324         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2325
2326         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2327         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2328         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2329
2330         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2331         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2332
2333         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2334         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2335
2336         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2337               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2338         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2339
2340         tg3_phy_toggle_auxctl_smdsp(tp, false);
2341 }
2342
2343 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2344 {
2345         u32 val;
2346         struct ethtool_eee *dest = &tp->eee;
2347
2348         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349                 return;
2350
2351         if (eee)
2352                 dest = eee;
2353
2354         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2355                 return;
2356
2357         /* Pull eee_active */
2358         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2359             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2360                 dest->eee_active = 1;
2361         } else
2362                 dest->eee_active = 0;
2363
2364         /* Pull lp advertised settings */
2365         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2366                 return;
2367         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2368
2369         /* Pull advertised and eee_enabled settings */
2370         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2371                 return;
2372         dest->eee_enabled = !!val;
2373         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374
2375         /* Pull tx_lpi_enabled */
2376         val = tr32(TG3_CPMU_EEE_MODE);
2377         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2378
2379         /* Pull lpi timer value */
2380         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2381 }
2382
2383 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2384 {
2385         u32 val;
2386
2387         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2388                 return;
2389
2390         tp->setlpicnt = 0;
2391
2392         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2393             current_link_up &&
2394             tp->link_config.active_duplex == DUPLEX_FULL &&
2395             (tp->link_config.active_speed == SPEED_100 ||
2396              tp->link_config.active_speed == SPEED_1000)) {
2397                 u32 eeectl;
2398
2399                 if (tp->link_config.active_speed == SPEED_1000)
2400                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2401                 else
2402                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2403
2404                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2405
2406                 tg3_eee_pull_config(tp, NULL);
2407                 if (tp->eee.eee_active)
2408                         tp->setlpicnt = 2;
2409         }
2410
2411         if (!tp->setlpicnt) {
2412                 if (current_link_up &&
2413                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2414                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2415                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2416                 }
2417
2418                 val = tr32(TG3_CPMU_EEE_MODE);
2419                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2420         }
2421 }
2422
2423 static void tg3_phy_eee_enable(struct tg3 *tp)
2424 {
2425         u32 val;
2426
2427         if (tp->link_config.active_speed == SPEED_1000 &&
2428             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2429              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2430              tg3_flag(tp, 57765_CLASS)) &&
2431             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2432                 val = MII_TG3_DSP_TAP26_ALNOKO |
2433                       MII_TG3_DSP_TAP26_RMRXSTO;
2434                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2435                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2436         }
2437
2438         val = tr32(TG3_CPMU_EEE_MODE);
2439         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2440 }
2441
2442 static int tg3_wait_macro_done(struct tg3 *tp)
2443 {
2444         int limit = 100;
2445
2446         while (limit--) {
2447                 u32 tmp32;
2448
2449                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2450                         if ((tmp32 & 0x1000) == 0)
2451                                 break;
2452                 }
2453         }
2454         if (limit < 0)
2455                 return -EBUSY;
2456
2457         return 0;
2458 }
2459
2460 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2461 {
2462         static const u32 test_pat[4][6] = {
2463         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2464         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2465         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2466         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2467         };
2468         int chan;
2469
2470         for (chan = 0; chan < 4; chan++) {
2471                 int i;
2472
2473                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2474                              (chan * 0x2000) | 0x0200);
2475                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2476
2477                 for (i = 0; i < 6; i++)
2478                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2479                                      test_pat[chan][i]);
2480
2481                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482                 if (tg3_wait_macro_done(tp)) {
2483                         *resetp = 1;
2484                         return -EBUSY;
2485                 }
2486
2487                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2488                              (chan * 0x2000) | 0x0200);
2489                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2490                 if (tg3_wait_macro_done(tp)) {
2491                         *resetp = 1;
2492                         return -EBUSY;
2493                 }
2494
2495                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2496                 if (tg3_wait_macro_done(tp)) {
2497                         *resetp = 1;
2498                         return -EBUSY;
2499                 }
2500
2501                 for (i = 0; i < 6; i += 2) {
2502                         u32 low, high;
2503
2504                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2505                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2506                             tg3_wait_macro_done(tp)) {
2507                                 *resetp = 1;
2508                                 return -EBUSY;
2509                         }
2510                         low &= 0x7fff;
2511                         high &= 0x000f;
2512                         if (low != test_pat[chan][i] ||
2513                             high != test_pat[chan][i+1]) {
2514                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2515                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2516                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2517
2518                                 return -EBUSY;
2519                         }
2520                 }
2521         }
2522
2523         return 0;
2524 }
2525
2526 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2527 {
2528         int chan;
2529
2530         for (chan = 0; chan < 4; chan++) {
2531                 int i;
2532
2533                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2534                              (chan * 0x2000) | 0x0200);
2535                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2536                 for (i = 0; i < 6; i++)
2537                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2538                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2539                 if (tg3_wait_macro_done(tp))
2540                         return -EBUSY;
2541         }
2542
2543         return 0;
2544 }
2545
2546 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2547 {
2548         u32 reg32, phy9_orig;
2549         int retries, do_phy_reset, err;
2550
2551         retries = 10;
2552         do_phy_reset = 1;
2553         do {
2554                 if (do_phy_reset) {
2555                         err = tg3_bmcr_reset(tp);
2556                         if (err)
2557                                 return err;
2558                         do_phy_reset = 0;
2559                 }
2560
2561                 /* Disable transmitter and interrupt.  */
2562                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2563                         continue;
2564
2565                 reg32 |= 0x3000;
2566                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2567
2568                 /* Set full-duplex, 1000 mbps.  */
2569                 tg3_writephy(tp, MII_BMCR,
2570                              BMCR_FULLDPLX | BMCR_SPEED1000);
2571
2572                 /* Set to master mode.  */
2573                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2574                         continue;
2575
2576                 tg3_writephy(tp, MII_CTRL1000,
2577                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2578
2579                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2580                 if (err)
2581                         return err;
2582
2583                 /* Block the PHY control access.  */
2584                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2585
2586                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2587                 if (!err)
2588                         break;
2589         } while (--retries);
2590
2591         err = tg3_phy_reset_chanpat(tp);
2592         if (err)
2593                 return err;
2594
2595         tg3_phydsp_write(tp, 0x8005, 0x0000);
2596
2597         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2598         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2599
2600         tg3_phy_toggle_auxctl_smdsp(tp, false);
2601
2602         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2603
2604         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2605         if (err)
2606                 return err;
2607
2608         reg32 &= ~0x3000;
2609         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2610
2611         return 0;
2612 }
2613
2614 static void tg3_carrier_off(struct tg3 *tp)
2615 {
2616         netif_carrier_off(tp->dev);
2617         tp->link_up = false;
2618 }
2619
2620 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2621 {
2622         if (tg3_flag(tp, ENABLE_ASF))
2623                 netdev_warn(tp->dev,
2624                             "Management side-band traffic will be interrupted during phy settings change\n");
2625 }
2626
2627 /* This will reset the tigon3 PHY if there is no valid
2628  * link unless the FORCE argument is non-zero.
2629  */
2630 static int tg3_phy_reset(struct tg3 *tp)
2631 {
2632         u32 val, cpmuctrl;
2633         int err;
2634
2635         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2636                 val = tr32(GRC_MISC_CFG);
2637                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2638                 udelay(40);
2639         }
2640         err  = tg3_readphy(tp, MII_BMSR, &val);
2641         err |= tg3_readphy(tp, MII_BMSR, &val);
2642         if (err != 0)
2643                 return -EBUSY;
2644
2645         if (netif_running(tp->dev) && tp->link_up) {
2646                 netif_carrier_off(tp->dev);
2647                 tg3_link_report(tp);
2648         }
2649
2650         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2651             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2652             tg3_asic_rev(tp) == ASIC_REV_5705) {
2653                 err = tg3_phy_reset_5703_4_5(tp);
2654                 if (err)
2655                         return err;
2656                 goto out;
2657         }
2658
2659         cpmuctrl = 0;
2660         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2661             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2662                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2663                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2664                         tw32(TG3_CPMU_CTRL,
2665                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2666         }
2667
2668         err = tg3_bmcr_reset(tp);
2669         if (err)
2670                 return err;
2671
2672         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2673                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2674                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2675
2676                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2677         }
2678
2679         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2680             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2681                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2682                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2683                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2684                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2685                         udelay(40);
2686                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2687                 }
2688         }
2689
2690         if (tg3_flag(tp, 5717_PLUS) &&
2691             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2692                 return 0;
2693
2694         tg3_phy_apply_otp(tp);
2695
2696         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2697                 tg3_phy_toggle_apd(tp, true);
2698         else
2699                 tg3_phy_toggle_apd(tp, false);
2700
2701 out:
2702         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2703             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2704                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2705                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2706                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2707         }
2708
2709         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2710                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2712         }
2713
2714         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2715                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2716                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2717                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2718                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2719                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2720                 }
2721         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2722                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2723                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2724                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2725                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2726                                 tg3_writephy(tp, MII_TG3_TEST1,
2727                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2728                         } else
2729                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2730
2731                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2732                 }
2733         }
2734
2735         /* Set Extended packet length bit (bit 14) on all chips that */
2736         /* support jumbo frames */
2737         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2738                 /* Cannot do read-modify-write on 5401 */
2739                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2740         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2741                 /* Set bit 14 with read-modify-write to preserve other bits */
2742                 err = tg3_phy_auxctl_read(tp,
2743                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2744                 if (!err)
2745                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2746                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2747         }
2748
2749         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2750          * jumbo frames transmission.
2751          */
2752         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2753                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2754                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2755                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2756         }
2757
2758         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2759                 /* adjust output voltage */
2760                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2761         }
2762
2763         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2764                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2765
2766         tg3_phy_toggle_automdix(tp, true);
2767         tg3_phy_set_wirespeed(tp);
2768         return 0;
2769 }
2770
2771 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2772 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2773 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2774                                           TG3_GPIO_MSG_NEED_VAUX)
2775 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2776         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2777          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2778          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2779          (TG3_GPIO_MSG_DRVR_PRES << 12))
2780
2781 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2782         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2783          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2784          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2785          (TG3_GPIO_MSG_NEED_VAUX << 12))
2786
2787 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2788 {
2789         u32 status, shift;
2790
2791         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792             tg3_asic_rev(tp) == ASIC_REV_5719)
2793                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2794         else
2795                 status = tr32(TG3_CPMU_DRV_STATUS);
2796
2797         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2798         status &= ~(TG3_GPIO_MSG_MASK << shift);
2799         status |= (newstat << shift);
2800
2801         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2802             tg3_asic_rev(tp) == ASIC_REV_5719)
2803                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2804         else
2805                 tw32(TG3_CPMU_DRV_STATUS, status);
2806
2807         return status >> TG3_APE_GPIO_MSG_SHIFT;
2808 }
2809
2810 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2811 {
2812         if (!tg3_flag(tp, IS_NIC))
2813                 return 0;
2814
2815         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2816             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2817             tg3_asic_rev(tp) == ASIC_REV_5720) {
2818                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2819                         return -EIO;
2820
2821                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2822
2823                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2824                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2825
2826                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2827         } else {
2828                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2829                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2830         }
2831
2832         return 0;
2833 }
2834
2835 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2836 {
2837         u32 grc_local_ctrl;
2838
2839         if (!tg3_flag(tp, IS_NIC) ||
2840             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2841             tg3_asic_rev(tp) == ASIC_REV_5701)
2842                 return;
2843
2844         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2845
2846         tw32_wait_f(GRC_LOCAL_CTRL,
2847                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2848                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2849
2850         tw32_wait_f(GRC_LOCAL_CTRL,
2851                     grc_local_ctrl,
2852                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2853
2854         tw32_wait_f(GRC_LOCAL_CTRL,
2855                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2856                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 }
2858
2859 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2860 {
2861         if (!tg3_flag(tp, IS_NIC))
2862                 return;
2863
2864         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2865             tg3_asic_rev(tp) == ASIC_REV_5701) {
2866                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2867                             (GRC_LCLCTRL_GPIO_OE0 |
2868                              GRC_LCLCTRL_GPIO_OE1 |
2869                              GRC_LCLCTRL_GPIO_OE2 |
2870                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2871                              GRC_LCLCTRL_GPIO_OUTPUT1),
2872                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2873         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2874                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2875                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2876                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2877                                      GRC_LCLCTRL_GPIO_OE1 |
2878                                      GRC_LCLCTRL_GPIO_OE2 |
2879                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2880                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2881                                      tp->grc_local_ctrl;
2882                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2883                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2884
2885                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2886                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2888
2889                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2890                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2892         } else {
2893                 u32 no_gpio2;
2894                 u32 grc_local_ctrl = 0;
2895
2896                 /* Workaround to prevent overdrawing Amps. */
2897                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2898                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2899                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2900                                     grc_local_ctrl,
2901                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2902                 }
2903
2904                 /* On 5753 and variants, GPIO2 cannot be used. */
2905                 no_gpio2 = tp->nic_sram_data_cfg &
2906                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2907
2908                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2909                                   GRC_LCLCTRL_GPIO_OE1 |
2910                                   GRC_LCLCTRL_GPIO_OE2 |
2911                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2912                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2913                 if (no_gpio2) {
2914                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2915                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2916                 }
2917                 tw32_wait_f(GRC_LOCAL_CTRL,
2918                             tp->grc_local_ctrl | grc_local_ctrl,
2919                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2920
2921                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2922
2923                 tw32_wait_f(GRC_LOCAL_CTRL,
2924                             tp->grc_local_ctrl | grc_local_ctrl,
2925                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2926
2927                 if (!no_gpio2) {
2928                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2929                         tw32_wait_f(GRC_LOCAL_CTRL,
2930                                     tp->grc_local_ctrl | grc_local_ctrl,
2931                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2932                 }
2933         }
2934 }
2935
2936 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2937 {
2938         u32 msg = 0;
2939
2940         /* Serialize power state transitions */
2941         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2942                 return;
2943
2944         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2945                 msg = TG3_GPIO_MSG_NEED_VAUX;
2946
2947         msg = tg3_set_function_status(tp, msg);
2948
2949         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2950                 goto done;
2951
2952         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2953                 tg3_pwrsrc_switch_to_vaux(tp);
2954         else
2955                 tg3_pwrsrc_die_with_vmain(tp);
2956
2957 done:
2958         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2959 }
2960
2961 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2962 {
2963         bool need_vaux = false;
2964
2965         /* The GPIOs do something completely different on 57765. */
2966         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2967                 return;
2968
2969         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2970             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2971             tg3_asic_rev(tp) == ASIC_REV_5720) {
2972                 tg3_frob_aux_power_5717(tp, include_wol ?
2973                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2974                 return;
2975         }
2976
2977         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2978                 struct net_device *dev_peer;
2979
2980                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2981
2982                 /* remove_one() may have been run on the peer. */
2983                 if (dev_peer) {
2984                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2985
2986                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2987                                 return;
2988
2989                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2990                             tg3_flag(tp_peer, ENABLE_ASF))
2991                                 need_vaux = true;
2992                 }
2993         }
2994
2995         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2996             tg3_flag(tp, ENABLE_ASF))
2997                 need_vaux = true;
2998
2999         if (need_vaux)
3000                 tg3_pwrsrc_switch_to_vaux(tp);
3001         else
3002                 tg3_pwrsrc_die_with_vmain(tp);
3003 }
3004
3005 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3006 {
3007         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3008                 return 1;
3009         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3010                 if (speed != SPEED_10)
3011                         return 1;
3012         } else if (speed == SPEED_10)
3013                 return 1;
3014
3015         return 0;
3016 }
3017
3018 static bool tg3_phy_power_bug(struct tg3 *tp)
3019 {
3020         switch (tg3_asic_rev(tp)) {
3021         case ASIC_REV_5700:
3022         case ASIC_REV_5704:
3023                 return true;
3024         case ASIC_REV_5780:
3025                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3026                         return true;
3027                 return false;
3028         case ASIC_REV_5717:
3029                 if (!tp->pci_fn)
3030                         return true;
3031                 return false;
3032         case ASIC_REV_5719:
3033         case ASIC_REV_5720:
3034                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3035                     !tp->pci_fn)
3036                         return true;
3037                 return false;
3038         }
3039
3040         return false;
3041 }
3042
3043 static bool tg3_phy_led_bug(struct tg3 *tp)
3044 {
3045         switch (tg3_asic_rev(tp)) {
3046         case ASIC_REV_5719:
3047         case ASIC_REV_5720:
3048                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3049                     !tp->pci_fn)
3050                         return true;
3051                 return false;
3052         }
3053
3054         return false;
3055 }
3056
3057 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3058 {
3059         u32 val;
3060
3061         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3062                 return;
3063
3064         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3065                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3066                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3067                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3068
3069                         sg_dig_ctrl |=
3070                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3071                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3072                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3073                 }
3074                 return;
3075         }
3076
3077         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3078                 tg3_bmcr_reset(tp);
3079                 val = tr32(GRC_MISC_CFG);
3080                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3081                 udelay(40);
3082                 return;
3083         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3084                 u32 phytest;
3085                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3086                         u32 phy;
3087
3088                         tg3_writephy(tp, MII_ADVERTISE, 0);
3089                         tg3_writephy(tp, MII_BMCR,
3090                                      BMCR_ANENABLE | BMCR_ANRESTART);
3091
3092                         tg3_writephy(tp, MII_TG3_FET_TEST,
3093                                      phytest | MII_TG3_FET_SHADOW_EN);
3094                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3095                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3096                                 tg3_writephy(tp,
3097                                              MII_TG3_FET_SHDW_AUXMODE4,
3098                                              phy);
3099                         }
3100                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3101                 }
3102                 return;
3103         } else if (do_low_power) {
3104                 if (!tg3_phy_led_bug(tp))
3105                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3106                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3107
3108                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3109                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3110                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3111                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3112         }
3113
3114         /* The PHY should not be powered down on some chips because
3115          * of bugs.
3116          */
3117         if (tg3_phy_power_bug(tp))
3118                 return;
3119
3120         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3121             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3122                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3123                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3124                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3125                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3126         }
3127
3128         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3129 }
3130
3131 /* tp->lock is held. */
3132 static int tg3_nvram_lock(struct tg3 *tp)
3133 {
3134         if (tg3_flag(tp, NVRAM)) {
3135                 int i;
3136
3137                 if (tp->nvram_lock_cnt == 0) {
3138                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3139                         for (i = 0; i < 8000; i++) {
3140                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3141                                         break;
3142                                 udelay(20);
3143                         }
3144                         if (i == 8000) {
3145                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3146                                 return -ENODEV;
3147                         }
3148                 }
3149                 tp->nvram_lock_cnt++;
3150         }
3151         return 0;
3152 }
3153
3154 /* tp->lock is held. */
3155 static void tg3_nvram_unlock(struct tg3 *tp)
3156 {
3157         if (tg3_flag(tp, NVRAM)) {
3158                 if (tp->nvram_lock_cnt > 0)
3159                         tp->nvram_lock_cnt--;
3160                 if (tp->nvram_lock_cnt == 0)
3161                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3162         }
3163 }
3164
3165 /* tp->lock is held. */
3166 static void tg3_enable_nvram_access(struct tg3 *tp)
3167 {
3168         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3169                 u32 nvaccess = tr32(NVRAM_ACCESS);
3170
3171                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3172         }
3173 }
3174
3175 /* tp->lock is held. */
3176 static void tg3_disable_nvram_access(struct tg3 *tp)
3177 {
3178         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3179                 u32 nvaccess = tr32(NVRAM_ACCESS);
3180
3181                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3182         }
3183 }
3184
3185 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3186                                         u32 offset, u32 *val)
3187 {
3188         u32 tmp;
3189         int i;
3190
3191         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3192                 return -EINVAL;
3193
3194         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3195                                         EEPROM_ADDR_DEVID_MASK |
3196                                         EEPROM_ADDR_READ);
3197         tw32(GRC_EEPROM_ADDR,
3198              tmp |
3199              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3200              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3201               EEPROM_ADDR_ADDR_MASK) |
3202              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3203
3204         for (i = 0; i < 1000; i++) {
3205                 tmp = tr32(GRC_EEPROM_ADDR);
3206
3207                 if (tmp & EEPROM_ADDR_COMPLETE)
3208                         break;
3209                 msleep(1);
3210         }
3211         if (!(tmp & EEPROM_ADDR_COMPLETE))
3212                 return -EBUSY;
3213
3214         tmp = tr32(GRC_EEPROM_DATA);
3215
3216         /*
3217          * The data will always be opposite the native endian
3218          * format.  Perform a blind byteswap to compensate.
3219          */
3220         *val = swab32(tmp);
3221
3222         return 0;
3223 }
3224
3225 #define NVRAM_CMD_TIMEOUT 5000
3226
3227 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3228 {
3229         int i;
3230
3231         tw32(NVRAM_CMD, nvram_cmd);
3232         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3233                 usleep_range(10, 40);
3234                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3235                         udelay(10);
3236                         break;
3237                 }
3238         }
3239
3240         if (i == NVRAM_CMD_TIMEOUT)
3241                 return -EBUSY;
3242
3243         return 0;
3244 }
3245
3246 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3247 {
3248         if (tg3_flag(tp, NVRAM) &&
3249             tg3_flag(tp, NVRAM_BUFFERED) &&
3250             tg3_flag(tp, FLASH) &&
3251             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252             (tp->nvram_jedecnum == JEDEC_ATMEL))
3253
3254                 addr = ((addr / tp->nvram_pagesize) <<
3255                         ATMEL_AT45DB0X1B_PAGE_POS) +
3256                        (addr % tp->nvram_pagesize);
3257
3258         return addr;
3259 }
3260
3261 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3262 {
3263         if (tg3_flag(tp, NVRAM) &&
3264             tg3_flag(tp, NVRAM_BUFFERED) &&
3265             tg3_flag(tp, FLASH) &&
3266             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3267             (tp->nvram_jedecnum == JEDEC_ATMEL))
3268
3269                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3270                         tp->nvram_pagesize) +
3271                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3272
3273         return addr;
3274 }
3275
3276 /* NOTE: Data read in from NVRAM is byteswapped according to
3277  * the byteswapping settings for all other register accesses.
3278  * tg3 devices are BE devices, so on a BE machine, the data
3279  * returned will be exactly as it is seen in NVRAM.  On a LE
3280  * machine, the 32-bit value will be byteswapped.
3281  */
3282 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3283 {
3284         int ret;
3285
3286         if (!tg3_flag(tp, NVRAM))
3287                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3288
3289         offset = tg3_nvram_phys_addr(tp, offset);
3290
3291         if (offset > NVRAM_ADDR_MSK)
3292                 return -EINVAL;
3293
3294         ret = tg3_nvram_lock(tp);
3295         if (ret)
3296                 return ret;
3297
3298         tg3_enable_nvram_access(tp);
3299
3300         tw32(NVRAM_ADDR, offset);
3301         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3302                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3303
3304         if (ret == 0)
3305                 *val = tr32(NVRAM_RDDATA);
3306
3307         tg3_disable_nvram_access(tp);
3308
3309         tg3_nvram_unlock(tp);
3310
3311         return ret;
3312 }
3313
3314 /* Ensures NVRAM data is in bytestream format. */
3315 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3316 {
3317         u32 v;
3318         int res = tg3_nvram_read(tp, offset, &v);
3319         if (!res)
3320                 *val = cpu_to_be32(v);
3321         return res;
3322 }
3323
3324 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3325                                     u32 offset, u32 len, u8 *buf)
3326 {
3327         int i, j, rc = 0;
3328         u32 val;
3329
3330         for (i = 0; i < len; i += 4) {
3331                 u32 addr;
3332                 __be32 data;
3333
3334                 addr = offset + i;
3335
3336                 memcpy(&data, buf + i, 4);
3337
3338                 /*
3339                  * The SEEPROM interface expects the data to always be opposite
3340                  * the native endian format.  We accomplish this by reversing
3341                  * all the operations that would have been performed on the
3342                  * data from a call to tg3_nvram_read_be32().
3343                  */
3344                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3345
3346                 val = tr32(GRC_EEPROM_ADDR);
3347                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3348
3349                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3350                         EEPROM_ADDR_READ);
3351                 tw32(GRC_EEPROM_ADDR, val |
3352                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3353                         (addr & EEPROM_ADDR_ADDR_MASK) |
3354                         EEPROM_ADDR_START |
3355                         EEPROM_ADDR_WRITE);
3356
3357                 for (j = 0; j < 1000; j++) {
3358                         val = tr32(GRC_EEPROM_ADDR);
3359
3360                         if (val & EEPROM_ADDR_COMPLETE)
3361                                 break;
3362                         msleep(1);
3363                 }
3364                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3365                         rc = -EBUSY;
3366                         break;
3367                 }
3368         }
3369
3370         return rc;
3371 }
3372
3373 /* offset and length are dword aligned */
3374 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3375                 u8 *buf)
3376 {
3377         int ret = 0;
3378         u32 pagesize = tp->nvram_pagesize;
3379         u32 pagemask = pagesize - 1;
3380         u32 nvram_cmd;
3381         u8 *tmp;
3382
3383         tmp = kmalloc(pagesize, GFP_KERNEL);
3384         if (tmp == NULL)
3385                 return -ENOMEM;
3386
3387         while (len) {
3388                 int j;
3389                 u32 phy_addr, page_off, size;
3390
3391                 phy_addr = offset & ~pagemask;
3392
3393                 for (j = 0; j < pagesize; j += 4) {
3394                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3395                                                   (__be32 *) (tmp + j));
3396                         if (ret)
3397                                 break;
3398                 }
3399                 if (ret)
3400                         break;
3401
3402                 page_off = offset & pagemask;
3403                 size = pagesize;
3404                 if (len < size)
3405                         size = len;
3406
3407                 len -= size;
3408
3409                 memcpy(tmp + page_off, buf, size);
3410
3411                 offset = offset + (pagesize - page_off);
3412
3413                 tg3_enable_nvram_access(tp);
3414
3415                 /*
3416                  * Before we can erase the flash page, we need
3417                  * to issue a special "write enable" command.
3418                  */
3419                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3420
3421                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422                         break;
3423
3424                 /* Erase the target page */
3425                 tw32(NVRAM_ADDR, phy_addr);
3426
3427                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3428                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3429
3430                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431                         break;
3432
3433                 /* Issue another write enable to start the write. */
3434                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3435
3436                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437                         break;
3438
3439                 for (j = 0; j < pagesize; j += 4) {
3440                         __be32 data;
3441
3442                         data = *((__be32 *) (tmp + j));
3443
3444                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3445
3446                         tw32(NVRAM_ADDR, phy_addr + j);
3447
3448                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3449                                 NVRAM_CMD_WR;
3450
3451                         if (j == 0)
3452                                 nvram_cmd |= NVRAM_CMD_FIRST;
3453                         else if (j == (pagesize - 4))
3454                                 nvram_cmd |= NVRAM_CMD_LAST;
3455
3456                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3457                         if (ret)
3458                                 break;
3459                 }
3460                 if (ret)
3461                         break;
3462         }
3463
3464         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3465         tg3_nvram_exec_cmd(tp, nvram_cmd);
3466
3467         kfree(tmp);
3468
3469         return ret;
3470 }
3471
3472 /* offset and length are dword aligned */
3473 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3474                 u8 *buf)
3475 {
3476         int i, ret = 0;
3477
3478         for (i = 0; i < len; i += 4, offset += 4) {
3479                 u32 page_off, phy_addr, nvram_cmd;
3480                 __be32 data;
3481
3482                 memcpy(&data, buf + i, 4);
3483                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3484
3485                 page_off = offset % tp->nvram_pagesize;
3486
3487                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3488
3489                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3490
3491                 if (page_off == 0 || i == 0)
3492                         nvram_cmd |= NVRAM_CMD_FIRST;
3493                 if (page_off == (tp->nvram_pagesize - 4))
3494                         nvram_cmd |= NVRAM_CMD_LAST;
3495
3496                 if (i == (len - 4))
3497                         nvram_cmd |= NVRAM_CMD_LAST;
3498
3499                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3500                     !tg3_flag(tp, FLASH) ||
3501                     !tg3_flag(tp, 57765_PLUS))
3502                         tw32(NVRAM_ADDR, phy_addr);
3503
3504                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3505                     !tg3_flag(tp, 5755_PLUS) &&
3506                     (tp->nvram_jedecnum == JEDEC_ST) &&
3507                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3508                         u32 cmd;
3509
3510                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3511                         ret = tg3_nvram_exec_cmd(tp, cmd);
3512                         if (ret)
3513                                 break;
3514                 }
3515                 if (!tg3_flag(tp, FLASH)) {
3516                         /* We always do complete word writes to eeprom. */
3517                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3518                 }
3519
3520                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3521                 if (ret)
3522                         break;
3523         }
3524         return ret;
3525 }
3526
3527 /* offset and length are dword aligned */
3528 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3529 {
3530         int ret;
3531
3532         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3533                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3534                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3535                 udelay(40);
3536         }
3537
3538         if (!tg3_flag(tp, NVRAM)) {
3539                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3540         } else {
3541                 u32 grc_mode;
3542
3543                 ret = tg3_nvram_lock(tp);
3544                 if (ret)
3545                         return ret;
3546
3547                 tg3_enable_nvram_access(tp);
3548                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3549                         tw32(NVRAM_WRITE1, 0x406);
3550
3551                 grc_mode = tr32(GRC_MODE);
3552                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3553
3554                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3555                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3556                                 buf);
3557                 } else {
3558                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3559                                 buf);
3560                 }
3561
3562                 grc_mode = tr32(GRC_MODE);
3563                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3564
3565                 tg3_disable_nvram_access(tp);
3566                 tg3_nvram_unlock(tp);
3567         }
3568
3569         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3570                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3571                 udelay(40);
3572         }
3573
3574         return ret;
3575 }
3576
3577 #define RX_CPU_SCRATCH_BASE     0x30000
3578 #define RX_CPU_SCRATCH_SIZE     0x04000
3579 #define TX_CPU_SCRATCH_BASE     0x34000
3580 #define TX_CPU_SCRATCH_SIZE     0x04000
3581
3582 /* tp->lock is held. */
3583 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3584 {
3585         int i;
3586         const int iters = 10000;
3587
3588         for (i = 0; i < iters; i++) {
3589                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3590                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3591                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3592                         break;
3593                 if (pci_channel_offline(tp->pdev))
3594                         return -EBUSY;
3595         }
3596
3597         return (i == iters) ? -EBUSY : 0;
3598 }
3599
3600 /* tp->lock is held. */
3601 static int tg3_rxcpu_pause(struct tg3 *tp)
3602 {
3603         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3604
3605         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3606         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3607         udelay(10);
3608
3609         return rc;
3610 }
3611
3612 /* tp->lock is held. */
3613 static int tg3_txcpu_pause(struct tg3 *tp)
3614 {
3615         return tg3_pause_cpu(tp, TX_CPU_BASE);
3616 }
3617
3618 /* tp->lock is held. */
3619 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3620 {
3621         tw32(cpu_base + CPU_STATE, 0xffffffff);
3622         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3623 }
3624
3625 /* tp->lock is held. */
3626 static void tg3_rxcpu_resume(struct tg3 *tp)
3627 {
3628         tg3_resume_cpu(tp, RX_CPU_BASE);
3629 }
3630
3631 /* tp->lock is held. */
3632 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3633 {
3634         int rc;
3635
3636         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3637
3638         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3639                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3640
3641                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3642                 return 0;
3643         }
3644         if (cpu_base == RX_CPU_BASE) {
3645                 rc = tg3_rxcpu_pause(tp);
3646         } else {
3647                 /*
3648                  * There is only an Rx CPU for the 5750 derivative in the
3649                  * BCM4785.
3650                  */
3651                 if (tg3_flag(tp, IS_SSB_CORE))
3652                         return 0;
3653
3654                 rc = tg3_txcpu_pause(tp);
3655         }
3656
3657         if (rc) {
3658                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3659                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3660                 return -ENODEV;
3661         }
3662
3663         /* Clear firmware's nvram arbitration. */
3664         if (tg3_flag(tp, NVRAM))
3665                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3666         return 0;
3667 }
3668
3669 static int tg3_fw_data_len(struct tg3 *tp,
3670                            const struct tg3_firmware_hdr *fw_hdr)
3671 {
3672         int fw_len;
3673
3674         /* Non fragmented firmware have one firmware header followed by a
3675          * contiguous chunk of data to be written. The length field in that
3676          * header is not the length of data to be written but the complete
3677          * length of the bss. The data length is determined based on
3678          * tp->fw->size minus headers.
3679          *
3680          * Fragmented firmware have a main header followed by multiple
3681          * fragments. Each fragment is identical to non fragmented firmware
3682          * with a firmware header followed by a contiguous chunk of data. In
3683          * the main header, the length field is unused and set to 0xffffffff.
3684          * In each fragment header the length is the entire size of that
3685          * fragment i.e. fragment data + header length. Data length is
3686          * therefore length field in the header minus TG3_FW_HDR_LEN.
3687          */
3688         if (tp->fw_len == 0xffffffff)
3689                 fw_len = be32_to_cpu(fw_hdr->len);
3690         else
3691                 fw_len = tp->fw->size;
3692
3693         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3694 }
3695
3696 /* tp->lock is held. */
3697 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3698                                  u32 cpu_scratch_base, int cpu_scratch_size,
3699                                  const struct tg3_firmware_hdr *fw_hdr)
3700 {
3701         int err, i;
3702         void (*write_op)(struct tg3 *, u32, u32);
3703         int total_len = tp->fw->size;
3704
3705         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3706                 netdev_err(tp->dev,
3707                            "%s: Trying to load TX cpu firmware which is 5705\n",
3708                            __func__);
3709                 return -EINVAL;
3710         }
3711
3712         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3713                 write_op = tg3_write_mem;
3714         else
3715                 write_op = tg3_write_indirect_reg32;
3716
3717         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3718                 /* It is possible that bootcode is still loading at this point.
3719                  * Get the nvram lock first before halting the cpu.
3720                  */
3721                 int lock_err = tg3_nvram_lock(tp);
3722                 err = tg3_halt_cpu(tp, cpu_base);
3723                 if (!lock_err)
3724                         tg3_nvram_unlock(tp);
3725                 if (err)
3726                         goto out;
3727
3728                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3729                         write_op(tp, cpu_scratch_base + i, 0);
3730                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731                 tw32(cpu_base + CPU_MODE,
3732                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3733         } else {
3734                 /* Subtract additional main header for fragmented firmware and
3735                  * advance to the first fragment
3736                  */
3737                 total_len -= TG3_FW_HDR_LEN;
3738                 fw_hdr++;
3739         }
3740
3741         do {
3742                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3743                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3744                         write_op(tp, cpu_scratch_base +
3745                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3746                                      (i * sizeof(u32)),
3747                                  be32_to_cpu(fw_data[i]));
3748
3749                 total_len -= be32_to_cpu(fw_hdr->len);
3750
3751                 /* Advance to next fragment */
3752                 fw_hdr = (struct tg3_firmware_hdr *)
3753                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3754         } while (total_len > 0);
3755
3756         err = 0;
3757
3758 out:
3759         return err;
3760 }
3761
3762 /* tp->lock is held. */
3763 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3764 {
3765         int i;
3766         const int iters = 5;
3767
3768         tw32(cpu_base + CPU_STATE, 0xffffffff);
3769         tw32_f(cpu_base + CPU_PC, pc);
3770
3771         for (i = 0; i < iters; i++) {
3772                 if (tr32(cpu_base + CPU_PC) == pc)
3773                         break;
3774                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3775                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3776                 tw32_f(cpu_base + CPU_PC, pc);
3777                 udelay(1000);
3778         }
3779
3780         return (i == iters) ? -EBUSY : 0;
3781 }
3782
3783 /* tp->lock is held. */
3784 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3785 {
3786         const struct tg3_firmware_hdr *fw_hdr;
3787         int err;
3788
3789         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790
3791         /* Firmware blob starts with version numbers, followed by
3792            start address and length. We are setting complete length.
3793            length = end_address_of_bss - start_address_of_text.
3794            Remainder is the blob to be loaded contiguously
3795            from start address. */
3796
3797         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3798                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3799                                     fw_hdr);
3800         if (err)
3801                 return err;
3802
3803         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3804                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3805                                     fw_hdr);
3806         if (err)
3807                 return err;
3808
3809         /* Now startup only the RX cpu. */
3810         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3811                                        be32_to_cpu(fw_hdr->base_addr));
3812         if (err) {
3813                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3814                            "should be %08x\n", __func__,
3815                            tr32(RX_CPU_BASE + CPU_PC),
3816                                 be32_to_cpu(fw_hdr->base_addr));
3817                 return -ENODEV;
3818         }
3819
3820         tg3_rxcpu_resume(tp);
3821
3822         return 0;
3823 }
3824
3825 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3826 {
3827         const int iters = 1000;
3828         int i;
3829         u32 val;
3830
3831         /* Wait for boot code to complete initialization and enter service
3832          * loop. It is then safe to download service patches
3833          */
3834         for (i = 0; i < iters; i++) {
3835                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3836                         break;
3837
3838                 udelay(10);
3839         }
3840
3841         if (i == iters) {
3842                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3843                 return -EBUSY;
3844         }
3845
3846         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3847         if (val & 0xff) {
3848                 netdev_warn(tp->dev,
3849                             "Other patches exist. Not downloading EEE patch\n");
3850                 return -EEXIST;
3851         }
3852
3853         return 0;
3854 }
3855
3856 /* tp->lock is held. */
3857 static void tg3_load_57766_firmware(struct tg3 *tp)
3858 {
3859         struct tg3_firmware_hdr *fw_hdr;
3860
3861         if (!tg3_flag(tp, NO_NVRAM))
3862                 return;
3863
3864         if (tg3_validate_rxcpu_state(tp))
3865                 return;
3866
3867         if (!tp->fw)
3868                 return;
3869
3870         /* This firmware blob has a different format than older firmware
3871          * releases as given below. The main difference is we have fragmented
3872          * data to be written to non-contiguous locations.
3873          *
3874          * In the beginning we have a firmware header identical to other
3875          * firmware which consists of version, base addr and length. The length
3876          * here is unused and set to 0xffffffff.
3877          *
3878          * This is followed by a series of firmware fragments which are
3879          * individually identical to previous firmware. i.e. they have the
3880          * firmware header and followed by data for that fragment. The version
3881          * field of the individual fragment header is unused.
3882          */
3883
3884         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3885         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3886                 return;
3887
3888         if (tg3_rxcpu_pause(tp))
3889                 return;
3890
3891         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3892         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3893
3894         tg3_rxcpu_resume(tp);
3895 }
3896
3897 /* tp->lock is held. */
3898 static int tg3_load_tso_firmware(struct tg3 *tp)
3899 {
3900         const struct tg3_firmware_hdr *fw_hdr;
3901         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3902         int err;
3903
3904         if (!tg3_flag(tp, FW_TSO))
3905                 return 0;
3906
3907         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3908
3909         /* Firmware blob starts with version numbers, followed by
3910            start address and length. We are setting complete length.
3911            length = end_address_of_bss - start_address_of_text.
3912            Remainder is the blob to be loaded contiguously
3913            from start address. */
3914
3915         cpu_scratch_size = tp->fw_len;
3916
3917         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3918                 cpu_base = RX_CPU_BASE;
3919                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3920         } else {
3921                 cpu_base = TX_CPU_BASE;
3922                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3923                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3924         }
3925
3926         err = tg3_load_firmware_cpu(tp, cpu_base,
3927                                     cpu_scratch_base, cpu_scratch_size,
3928                                     fw_hdr);
3929         if (err)
3930                 return err;
3931
3932         /* Now startup the cpu. */
3933         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3934                                        be32_to_cpu(fw_hdr->base_addr));
3935         if (err) {
3936                 netdev_err(tp->dev,
3937                            "%s fails to set CPU PC, is %08x should be %08x\n",
3938                            __func__, tr32(cpu_base + CPU_PC),
3939                            be32_to_cpu(fw_hdr->base_addr));
3940                 return -ENODEV;
3941         }
3942
3943         tg3_resume_cpu(tp, cpu_base);
3944         return 0;
3945 }
3946
3947 /* tp->lock is held. */
3948 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3949 {
3950         u32 addr_high, addr_low;
3951
3952         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954                     (mac_addr[4] <<  8) | mac_addr[5]);
3955
3956         if (index < 4) {
3957                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959         } else {
3960                 index -= 4;
3961                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963         }
3964 }
3965
3966 /* tp->lock is held. */
3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969         u32 addr_high;
3970         int i;
3971
3972         for (i = 0; i < 4; i++) {
3973                 if (i == 1 && skip_mac_1)
3974                         continue;
3975                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976         }
3977
3978         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979             tg3_asic_rev(tp) == ASIC_REV_5704) {
3980                 for (i = 4; i < 16; i++)
3981                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982         }
3983
3984         addr_high = (tp->dev->dev_addr[0] +
3985                      tp->dev->dev_addr[1] +
3986                      tp->dev->dev_addr[2] +
3987                      tp->dev->dev_addr[3] +
3988                      tp->dev->dev_addr[4] +
3989                      tp->dev->dev_addr[5]) &
3990                 TX_BACKOFF_SEED_MASK;
3991         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993
3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996         /*
3997          * Make sure register accesses (indirect or otherwise) will function
3998          * correctly.
3999          */
4000         pci_write_config_dword(tp->pdev,
4001                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003
4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006         int err;
4007
4008         tg3_enable_register_access(tp);
4009
4010         err = pci_set_power_state(tp->pdev, PCI_D0);
4011         if (!err) {
4012                 /* Switch out of Vaux if it is a NIC */
4013                 tg3_pwrsrc_switch_to_vmain(tp);
4014         } else {
4015                 netdev_err(tp->dev, "Transition to D0 failed\n");
4016         }
4017
4018         return err;
4019 }
4020
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022
4023 static int tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025         u32 misc_host_ctrl;
4026         bool device_should_wake, do_low_power;
4027
4028         tg3_enable_register_access(tp);
4029
4030         /* Restore the CLKREQ setting. */
4031         if (tg3_flag(tp, CLKREQ_BUG))
4032                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4034
4035         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036         tw32(TG3PCI_MISC_HOST_CTRL,
4037              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038
4039         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040                              tg3_flag(tp, WOL_ENABLE);
4041
4042         if (tg3_flag(tp, USE_PHYLIB)) {
4043                 do_low_power = false;
4044                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046                         struct phy_device *phydev;
4047                         u32 phyid, advertising;
4048
4049                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053                         tp->link_config.speed = phydev->speed;
4054                         tp->link_config.duplex = phydev->duplex;
4055                         tp->link_config.autoneg = phydev->autoneg;
4056                         tp->link_config.advertising = phydev->advertising;
4057
4058                         advertising = ADVERTISED_TP |
4059                                       ADVERTISED_Pause |
4060                                       ADVERTISED_Autoneg |
4061                                       ADVERTISED_10baseT_Half;
4062
4063                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4064                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4065                                         advertising |=
4066                                                 ADVERTISED_100baseT_Half |
4067                                                 ADVERTISED_100baseT_Full |
4068                                                 ADVERTISED_10baseT_Full;
4069                                 else
4070                                         advertising |= ADVERTISED_10baseT_Full;
4071                         }
4072
4073                         phydev->advertising = advertising;
4074
4075                         phy_start_aneg(phydev);
4076
4077                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4078                         if (phyid != PHY_ID_BCMAC131) {
4079                                 phyid &= PHY_BCM_OUI_MASK;
4080                                 if (phyid == PHY_BCM_OUI_1 ||
4081                                     phyid == PHY_BCM_OUI_2 ||
4082                                     phyid == PHY_BCM_OUI_3)
4083                                         do_low_power = true;
4084                         }
4085                 }
4086         } else {
4087                 do_low_power = true;
4088
4089                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4090                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4091
4092                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4093                         tg3_setup_phy(tp, false);
4094         }
4095
4096         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4097                 u32 val;
4098
4099                 val = tr32(GRC_VCPU_EXT_CTRL);
4100                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4101         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4102                 int i;
4103                 u32 val;
4104
4105                 for (i = 0; i < 200; i++) {
4106                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4107                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4108                                 break;
4109                         msleep(1);
4110                 }
4111         }
4112         if (tg3_flag(tp, WOL_CAP))
4113                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4114                                                      WOL_DRV_STATE_SHUTDOWN |
4115                                                      WOL_DRV_WOL |
4116                                                      WOL_SET_MAGIC_PKT);
4117
4118         if (device_should_wake) {
4119                 u32 mac_mode;
4120
4121                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4122                         if (do_low_power &&
4123                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4124                                 tg3_phy_auxctl_write(tp,
4125                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4126                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4127                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4128                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4129                                 udelay(40);
4130                         }
4131
4132                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4133                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4134                         else if (tp->phy_flags &
4135                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4136                                 if (tp->link_config.active_speed == SPEED_1000)
4137                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4138                                 else
4139                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4140                         } else
4141                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4142
4143                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4144                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4145                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4146                                              SPEED_100 : SPEED_10;
4147                                 if (tg3_5700_link_polarity(tp, speed))
4148                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4149                                 else
4150                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4151                         }
4152                 } else {
4153                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4154                 }
4155
4156                 if (!tg3_flag(tp, 5750_PLUS))
4157                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4158
4159                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4160                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4161                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4162                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4163
4164                 if (tg3_flag(tp, ENABLE_APE))
4165                         mac_mode |= MAC_MODE_APE_TX_EN |
4166                                     MAC_MODE_APE_RX_EN |
4167                                     MAC_MODE_TDE_ENABLE;
4168
4169                 tw32_f(MAC_MODE, mac_mode);
4170                 udelay(100);
4171
4172                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4173                 udelay(10);
4174         }
4175
4176         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4177             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4178              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4179                 u32 base_val;
4180
4181                 base_val = tp->pci_clock_ctrl;
4182                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4183                              CLOCK_CTRL_TXCLK_DISABLE);
4184
4185                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4186                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4187         } else if (tg3_flag(tp, 5780_CLASS) ||
4188                    tg3_flag(tp, CPMU_PRESENT) ||
4189                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4190                 /* do nothing */
4191         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4192                 u32 newbits1, newbits2;
4193
4194                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4195                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4196                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4197                                     CLOCK_CTRL_TXCLK_DISABLE |
4198                                     CLOCK_CTRL_ALTCLK);
4199                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4200                 } else if (tg3_flag(tp, 5705_PLUS)) {
4201                         newbits1 = CLOCK_CTRL_625_CORE;
4202                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4203                 } else {
4204                         newbits1 = CLOCK_CTRL_ALTCLK;
4205                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4206                 }
4207
4208                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4209                             40);
4210
4211                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4212                             40);
4213
4214                 if (!tg3_flag(tp, 5705_PLUS)) {
4215                         u32 newbits3;
4216
4217                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4218                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4219                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4220                                             CLOCK_CTRL_TXCLK_DISABLE |
4221                                             CLOCK_CTRL_44MHZ_CORE);
4222                         } else {
4223                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4224                         }
4225
4226                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4227                                     tp->pci_clock_ctrl | newbits3, 40);
4228                 }
4229         }
4230
4231         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4232                 tg3_power_down_phy(tp, do_low_power);
4233
4234         tg3_frob_aux_power(tp, true);
4235
4236         /* Workaround for unstable PLL clock */
4237         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4238             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4239              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4240                 u32 val = tr32(0x7d00);
4241
4242                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4243                 tw32(0x7d00, val);
4244                 if (!tg3_flag(tp, ENABLE_ASF)) {
4245                         int err;
4246
4247                         err = tg3_nvram_lock(tp);
4248                         tg3_halt_cpu(tp, RX_CPU_BASE);
4249                         if (!err)
4250                                 tg3_nvram_unlock(tp);
4251                 }
4252         }
4253
4254         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4255
4256         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4257
4258         return 0;
4259 }
4260
4261 static void tg3_power_down(struct tg3 *tp)
4262 {
4263         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4264         pci_set_power_state(tp->pdev, PCI_D3hot);
4265 }
4266
4267 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4268 {
4269         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4270         case MII_TG3_AUX_STAT_10HALF:
4271                 *speed = SPEED_10;
4272                 *duplex = DUPLEX_HALF;
4273                 break;
4274
4275         case MII_TG3_AUX_STAT_10FULL:
4276                 *speed = SPEED_10;
4277                 *duplex = DUPLEX_FULL;
4278                 break;
4279
4280         case MII_TG3_AUX_STAT_100HALF:
4281                 *speed = SPEED_100;
4282                 *duplex = DUPLEX_HALF;
4283                 break;
4284
4285         case MII_TG3_AUX_STAT_100FULL:
4286                 *speed = SPEED_100;
4287                 *duplex = DUPLEX_FULL;
4288                 break;
4289
4290         case MII_TG3_AUX_STAT_1000HALF:
4291                 *speed = SPEED_1000;
4292                 *duplex = DUPLEX_HALF;
4293                 break;
4294
4295         case MII_TG3_AUX_STAT_1000FULL:
4296                 *speed = SPEED_1000;
4297                 *duplex = DUPLEX_FULL;
4298                 break;
4299
4300         default:
4301                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4302                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4303                                  SPEED_10;
4304                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4305                                   DUPLEX_HALF;
4306                         break;
4307                 }
4308                 *speed = SPEED_UNKNOWN;
4309                 *duplex = DUPLEX_UNKNOWN;
4310                 break;
4311         }
4312 }
4313
4314 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4315 {
4316         int err = 0;
4317         u32 val, new_adv;
4318
4319         new_adv = ADVERTISE_CSMA;
4320         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4321         new_adv |= mii_advertise_flowctrl(flowctrl);
4322
4323         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4324         if (err)
4325                 goto done;
4326
4327         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4328                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4329
4330                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4331                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4332                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4333
4334                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4335                 if (err)
4336                         goto done;
4337         }
4338
4339         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4340                 goto done;
4341
4342         tw32(TG3_CPMU_EEE_MODE,
4343              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4344
4345         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4346         if (!err) {
4347                 u32 err2;
4348
4349                 val = 0;
4350                 /* Advertise 100-BaseTX EEE ability */
4351                 if (advertise & ADVERTISED_100baseT_Full)
4352                         val |= MDIO_AN_EEE_ADV_100TX;
4353                 /* Advertise 1000-BaseT EEE ability */
4354                 if (advertise & ADVERTISED_1000baseT_Full)
4355                         val |= MDIO_AN_EEE_ADV_1000T;
4356
4357                 if (!tp->eee.eee_enabled) {
4358                         val = 0;
4359                         tp->eee.advertised = 0;
4360                 } else {
4361                         tp->eee.advertised = advertise &
4362                                              (ADVERTISED_100baseT_Full |
4363                                               ADVERTISED_1000baseT_Full);
4364                 }
4365
4366                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4367                 if (err)
4368                         val = 0;
4369
4370                 switch (tg3_asic_rev(tp)) {
4371                 case ASIC_REV_5717:
4372                 case ASIC_REV_57765:
4373                 case ASIC_REV_57766:
4374                 case ASIC_REV_5719:
4375                         /* If we advertised any eee advertisements above... */
4376                         if (val)
4377                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4378                                       MII_TG3_DSP_TAP26_RMRXSTO |
4379                                       MII_TG3_DSP_TAP26_OPCSINPT;
4380                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4381                         /* Fall through */
4382                 case ASIC_REV_5720:
4383                 case ASIC_REV_5762:
4384                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4385                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4386                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4387                 }
4388
4389                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4390                 if (!err)
4391                         err = err2;
4392         }
4393
4394 done:
4395         return err;
4396 }
4397
4398 static void tg3_phy_copper_begin(struct tg3 *tp)
4399 {
4400         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4401             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4402                 u32 adv, fc;
4403
4404                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4405                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4406                         adv = ADVERTISED_10baseT_Half |
4407                               ADVERTISED_10baseT_Full;
4408                         if (tg3_flag(tp, WOL_SPEED_100MB))
4409                                 adv |= ADVERTISED_100baseT_Half |
4410                                        ADVERTISED_100baseT_Full;
4411                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4412                                 if (!(tp->phy_flags &
4413                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4414                                         adv |= ADVERTISED_1000baseT_Half;
4415                                 adv |= ADVERTISED_1000baseT_Full;
4416                         }
4417
4418                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4419                 } else {
4420                         adv = tp->link_config.advertising;
4421                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4422                                 adv &= ~(ADVERTISED_1000baseT_Half |
4423                                          ADVERTISED_1000baseT_Full);
4424
4425                         fc = tp->link_config.flowctrl;
4426                 }
4427
4428                 tg3_phy_autoneg_cfg(tp, adv, fc);
4429
4430                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4431                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4432                         /* Normally during power down we want to autonegotiate
4433                          * the lowest possible speed for WOL. However, to avoid
4434                          * link flap, we leave it untouched.
4435                          */
4436                         return;
4437                 }
4438
4439                 tg3_writephy(tp, MII_BMCR,
4440                              BMCR_ANENABLE | BMCR_ANRESTART);
4441         } else {
4442                 int i;
4443                 u32 bmcr, orig_bmcr;
4444
4445                 tp->link_config.active_speed = tp->link_config.speed;
4446                 tp->link_config.active_duplex = tp->link_config.duplex;
4447
4448                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4449                         /* With autoneg disabled, 5715 only links up when the
4450                          * advertisement register has the configured speed
4451                          * enabled.
4452                          */
4453                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4454                 }
4455
4456                 bmcr = 0;
4457                 switch (tp->link_config.speed) {
4458                 default:
4459                 case SPEED_10:
4460                         break;
4461
4462                 case SPEED_100:
4463                         bmcr |= BMCR_SPEED100;
4464                         break;
4465
4466                 case SPEED_1000:
4467                         bmcr |= BMCR_SPEED1000;
4468                         break;
4469                 }
4470
4471                 if (tp->link_config.duplex == DUPLEX_FULL)
4472                         bmcr |= BMCR_FULLDPLX;
4473
4474                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4475                     (bmcr != orig_bmcr)) {
4476                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4477                         for (i = 0; i < 1500; i++) {
4478                                 u32 tmp;
4479
4480                                 udelay(10);
4481                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4482                                     tg3_readphy(tp, MII_BMSR, &tmp))
4483                                         continue;
4484                                 if (!(tmp & BMSR_LSTATUS)) {
4485                                         udelay(40);
4486                                         break;
4487                                 }
4488                         }
4489                         tg3_writephy(tp, MII_BMCR, bmcr);
4490                         udelay(40);
4491                 }
4492         }
4493 }
4494
4495 static int tg3_phy_pull_config(struct tg3 *tp)
4496 {
4497         int err;
4498         u32 val;
4499
4500         err = tg3_readphy(tp, MII_BMCR, &val);
4501         if (err)
4502                 goto done;
4503
4504         if (!(val & BMCR_ANENABLE)) {
4505                 tp->link_config.autoneg = AUTONEG_DISABLE;
4506                 tp->link_config.advertising = 0;
4507                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4508
4509                 err = -EIO;
4510
4511                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4512                 case 0:
4513                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4514                                 goto done;
4515
4516                         tp->link_config.speed = SPEED_10;
4517                         break;
4518                 case BMCR_SPEED100:
4519                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4520                                 goto done;
4521
4522                         tp->link_config.speed = SPEED_100;
4523                         break;
4524                 case BMCR_SPEED1000:
4525                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4526                                 tp->link_config.speed = SPEED_1000;
4527                                 break;
4528                         }
4529                         /* Fall through */
4530                 default:
4531                         goto done;
4532                 }
4533
4534                 if (val & BMCR_FULLDPLX)
4535                         tp->link_config.duplex = DUPLEX_FULL;
4536                 else
4537                         tp->link_config.duplex = DUPLEX_HALF;
4538
4539                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4540
4541                 err = 0;
4542                 goto done;
4543         }
4544
4545         tp->link_config.autoneg = AUTONEG_ENABLE;
4546         tp->link_config.advertising = ADVERTISED_Autoneg;
4547         tg3_flag_set(tp, PAUSE_AUTONEG);
4548
4549         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4550                 u32 adv;
4551
4552                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4553                 if (err)
4554                         goto done;
4555
4556                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4557                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4558
4559                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4560         } else {
4561                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4562         }
4563
4564         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4565                 u32 adv;
4566
4567                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4569                         if (err)
4570                                 goto done;
4571
4572                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4573                 } else {
4574                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4575                         if (err)
4576                                 goto done;
4577
4578                         adv = tg3_decode_flowctrl_1000X(val);
4579                         tp->link_config.flowctrl = adv;
4580
4581                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4582                         adv = mii_adv_to_ethtool_adv_x(val);
4583                 }
4584
4585                 tp->link_config.advertising |= adv;
4586         }
4587
4588 done:
4589         return err;
4590 }
4591
4592 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4593 {
4594         int err;
4595
4596         /* Turn off tap power management. */
4597         /* Set Extended packet length bit */
4598         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4599
4600         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4601         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4602         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4603         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4604         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4605
4606         udelay(40);
4607
4608         return err;
4609 }
4610
4611 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4612 {
4613         struct ethtool_eee eee;
4614
4615         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4616                 return true;
4617
4618         tg3_eee_pull_config(tp, &eee);
4619
4620         if (tp->eee.eee_enabled) {
4621                 if (tp->eee.advertised != eee.advertised ||
4622                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4623                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4624                         return false;
4625         } else {
4626                 /* EEE is disabled but we're advertising */
4627                 if (eee.advertised)
4628                         return false;
4629         }
4630
4631         return true;
4632 }
4633
4634 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4635 {
4636         u32 advmsk, tgtadv, advertising;
4637
4638         advertising = tp->link_config.advertising;
4639         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4640
4641         advmsk = ADVERTISE_ALL;
4642         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4643                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4644                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4645         }
4646
4647         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4648                 return false;
4649
4650         if ((*lcladv & advmsk) != tgtadv)
4651                 return false;
4652
4653         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4654                 u32 tg3_ctrl;
4655
4656                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4657
4658                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4659                         return false;
4660
4661                 if (tgtadv &&
4662                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4663                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4664                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4665                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4666                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4667                 } else {
4668                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4669                 }
4670
4671                 if (tg3_ctrl != tgtadv)
4672                         return false;
4673         }
4674
4675         return true;
4676 }
4677
4678 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4679 {
4680         u32 lpeth = 0;
4681
4682         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4683                 u32 val;
4684
4685                 if (tg3_readphy(tp, MII_STAT1000, &val))
4686                         return false;
4687
4688                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4689         }
4690
4691         if (tg3_readphy(tp, MII_LPA, rmtadv))
4692                 return false;
4693
4694         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4695         tp->link_config.rmt_adv = lpeth;
4696
4697         return true;
4698 }
4699
4700 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4701 {
4702         if (curr_link_up != tp->link_up) {
4703                 if (curr_link_up) {
4704                         netif_carrier_on(tp->dev);
4705                 } else {
4706                         netif_carrier_off(tp->dev);
4707                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4708                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4709                 }
4710
4711                 tg3_link_report(tp);
4712                 return true;
4713         }
4714
4715         return false;
4716 }
4717
4718 static void tg3_clear_mac_status(struct tg3 *tp)
4719 {
4720         tw32(MAC_EVENT, 0);
4721
4722         tw32_f(MAC_STATUS,
4723                MAC_STATUS_SYNC_CHANGED |
4724                MAC_STATUS_CFG_CHANGED |
4725                MAC_STATUS_MI_COMPLETION |
4726                MAC_STATUS_LNKSTATE_CHANGED);
4727         udelay(40);
4728 }
4729
4730 static void tg3_setup_eee(struct tg3 *tp)
4731 {
4732         u32 val;
4733
4734         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4735               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4736         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4737                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4738
4739         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4740
4741         tw32_f(TG3_CPMU_EEE_CTRL,
4742                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4743
4744         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4745               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4746               TG3_CPMU_EEEMD_LPI_IN_RX |
4747               TG3_CPMU_EEEMD_EEE_ENABLE;
4748
4749         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4750                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4751
4752         if (tg3_flag(tp, ENABLE_APE))
4753                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4754
4755         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4756
4757         tw32_f(TG3_CPMU_EEE_DBTMR1,
4758                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4759                (tp->eee.tx_lpi_timer & 0xffff));
4760
4761         tw32_f(TG3_CPMU_EEE_DBTMR2,
4762                TG3_CPMU_DBTMR2_APE_TX_2047US |
4763                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4764 }
4765
4766 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4767 {
4768         bool current_link_up;
4769         u32 bmsr, val;
4770         u32 lcl_adv, rmt_adv;
4771         u16 current_speed;
4772         u8 current_duplex;
4773         int i, err;
4774
4775         tg3_clear_mac_status(tp);
4776
4777         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4778                 tw32_f(MAC_MI_MODE,
4779                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4780                 udelay(80);
4781         }
4782
4783         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4784
4785         /* Some third-party PHYs need to be reset on link going
4786          * down.
4787          */
4788         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4789              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4790              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4791             tp->link_up) {
4792                 tg3_readphy(tp, MII_BMSR, &bmsr);
4793                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4794                     !(bmsr & BMSR_LSTATUS))
4795                         force_reset = true;
4796         }
4797         if (force_reset)
4798                 tg3_phy_reset(tp);
4799
4800         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4801                 tg3_readphy(tp, MII_BMSR, &bmsr);
4802                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4803                     !tg3_flag(tp, INIT_COMPLETE))
4804                         bmsr = 0;
4805
4806                 if (!(bmsr & BMSR_LSTATUS)) {
4807                         err = tg3_init_5401phy_dsp(tp);
4808                         if (err)
4809                                 return err;
4810
4811                         tg3_readphy(tp, MII_BMSR, &bmsr);
4812                         for (i = 0; i < 1000; i++) {
4813                                 udelay(10);
4814                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4815                                     (bmsr & BMSR_LSTATUS)) {
4816                                         udelay(40);
4817                                         break;
4818                                 }
4819                         }
4820
4821                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4822                             TG3_PHY_REV_BCM5401_B0 &&
4823                             !(bmsr & BMSR_LSTATUS) &&
4824                             tp->link_config.active_speed == SPEED_1000) {
4825                                 err = tg3_phy_reset(tp);
4826                                 if (!err)
4827                                         err = tg3_init_5401phy_dsp(tp);
4828                                 if (err)
4829                                         return err;
4830                         }
4831                 }
4832         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4833                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4834                 /* 5701 {A0,B0} CRC bug workaround */
4835                 tg3_writephy(tp, 0x15, 0x0a75);
4836                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4838                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4839         }
4840
4841         /* Clear pending interrupts... */
4842         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4843         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4844
4845         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4846                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4847         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4848                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4849
4850         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4851             tg3_asic_rev(tp) == ASIC_REV_5701) {
4852                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4853                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4854                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4855                 else
4856                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4857         }
4858
4859         current_link_up = false;
4860         current_speed = SPEED_UNKNOWN;
4861         current_duplex = DUPLEX_UNKNOWN;
4862         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4863         tp->link_config.rmt_adv = 0;
4864
4865         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4866                 err = tg3_phy_auxctl_read(tp,
4867                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4868                                           &val);
4869                 if (!err && !(val & (1 << 10))) {
4870                         tg3_phy_auxctl_write(tp,
4871                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4872                                              val | (1 << 10));
4873                         goto relink;
4874                 }
4875         }
4876
4877         bmsr = 0;
4878         for (i = 0; i < 100; i++) {
4879                 tg3_readphy(tp, MII_BMSR, &bmsr);
4880                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4881                     (bmsr & BMSR_LSTATUS))
4882                         break;
4883                 udelay(40);
4884         }
4885
4886         if (bmsr & BMSR_LSTATUS) {
4887                 u32 aux_stat, bmcr;
4888
4889                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4890                 for (i = 0; i < 2000; i++) {
4891                         udelay(10);
4892                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4893                             aux_stat)
4894                                 break;
4895                 }
4896
4897                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4898                                              &current_speed,
4899                                              &current_duplex);
4900
4901                 bmcr = 0;
4902                 for (i = 0; i < 200; i++) {
4903                         tg3_readphy(tp, MII_BMCR, &bmcr);
4904                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4905                                 continue;
4906                         if (bmcr && bmcr != 0x7fff)
4907                                 break;
4908                         udelay(10);
4909                 }
4910
4911                 lcl_adv = 0;
4912                 rmt_adv = 0;
4913
4914                 tp->link_config.active_speed = current_speed;
4915                 tp->link_config.active_duplex = current_duplex;
4916
4917                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4918                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4919
4920                         if ((bmcr & BMCR_ANENABLE) &&
4921                             eee_config_ok &&
4922                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4923                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4924                                 current_link_up = true;
4925
4926                         /* EEE settings changes take effect only after a phy
4927                          * reset.  If we have skipped a reset due to Link Flap
4928                          * Avoidance being enabled, do it now.
4929                          */
4930                         if (!eee_config_ok &&
4931                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4932                             !force_reset) {
4933                                 tg3_setup_eee(tp);
4934                                 tg3_phy_reset(tp);
4935                         }
4936                 } else {
4937                         if (!(bmcr & BMCR_ANENABLE) &&
4938                             tp->link_config.speed == current_speed &&
4939                             tp->link_config.duplex == current_duplex) {
4940                                 current_link_up = true;
4941                         }
4942                 }
4943
4944                 if (current_link_up &&
4945                     tp->link_config.active_duplex == DUPLEX_FULL) {
4946                         u32 reg, bit;
4947
4948                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4949                                 reg = MII_TG3_FET_GEN_STAT;
4950                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4951                         } else {
4952                                 reg = MII_TG3_EXT_STAT;
4953                                 bit = MII_TG3_EXT_STAT_MDIX;
4954                         }
4955
4956                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4957                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4958
4959                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4960                 }
4961         }
4962
4963 relink:
4964         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4965                 tg3_phy_copper_begin(tp);
4966
4967                 if (tg3_flag(tp, ROBOSWITCH)) {
4968                         current_link_up = true;
4969                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4970                         current_speed = SPEED_1000;
4971                         current_duplex = DUPLEX_FULL;
4972                         tp->link_config.active_speed = current_speed;
4973                         tp->link_config.active_duplex = current_duplex;
4974                 }
4975
4976                 tg3_readphy(tp, MII_BMSR, &bmsr);
4977                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4978                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4979                         current_link_up = true;
4980         }
4981
4982         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4983         if (current_link_up) {
4984                 if (tp->link_config.active_speed == SPEED_100 ||
4985                     tp->link_config.active_speed == SPEED_10)
4986                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4987                 else
4988                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4989         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4990                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4991         else
4992                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4993
4994         /* In order for the 5750 core in BCM4785 chip to work properly
4995          * in RGMII mode, the Led Control Register must be set up.
4996          */
4997         if (tg3_flag(tp, RGMII_MODE)) {
4998                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4999                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5000
5001                 if (tp->link_config.active_speed == SPEED_10)
5002                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5003                 else if (tp->link_config.active_speed == SPEED_100)
5004                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005                                      LED_CTRL_100MBPS_ON);
5006                 else if (tp->link_config.active_speed == SPEED_1000)
5007                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5008                                      LED_CTRL_1000MBPS_ON);
5009
5010                 tw32(MAC_LED_CTRL, led_ctrl);
5011                 udelay(40);
5012         }
5013
5014         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5015         if (tp->link_config.active_duplex == DUPLEX_HALF)
5016                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5017
5018         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5019                 if (current_link_up &&
5020                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5021                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5022                 else
5023                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5024         }
5025
5026         /* ??? Without this setting Netgear GA302T PHY does not
5027          * ??? send/receive packets...
5028          */
5029         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5030             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5031                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5032                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5033                 udelay(80);
5034         }
5035
5036         tw32_f(MAC_MODE, tp->mac_mode);
5037         udelay(40);
5038
5039         tg3_phy_eee_adjust(tp, current_link_up);
5040
5041         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5042                 /* Polled via timer. */
5043                 tw32_f(MAC_EVENT, 0);
5044         } else {
5045                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5046         }
5047         udelay(40);
5048
5049         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5050             current_link_up &&
5051             tp->link_config.active_speed == SPEED_1000 &&
5052             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5053                 udelay(120);
5054                 tw32_f(MAC_STATUS,
5055                      (MAC_STATUS_SYNC_CHANGED |
5056                       MAC_STATUS_CFG_CHANGED));
5057                 udelay(40);
5058                 tg3_write_mem(tp,
5059                               NIC_SRAM_FIRMWARE_MBOX,
5060                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5061         }
5062
5063         /* Prevent send BD corruption. */
5064         if (tg3_flag(tp, CLKREQ_BUG)) {
5065                 if (tp->link_config.active_speed == SPEED_100 ||
5066                     tp->link_config.active_speed == SPEED_10)
5067                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5068                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5069                 else
5070                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5071                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5072         }
5073
5074         tg3_test_and_report_link_chg(tp, current_link_up);
5075
5076         return 0;
5077 }
5078
5079 struct tg3_fiber_aneginfo {
5080         int state;
5081 #define ANEG_STATE_UNKNOWN              0
5082 #define ANEG_STATE_AN_ENABLE            1
5083 #define ANEG_STATE_RESTART_INIT         2
5084 #define ANEG_STATE_RESTART              3
5085 #define ANEG_STATE_DISABLE_LINK_OK      4
5086 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5087 #define ANEG_STATE_ABILITY_DETECT       6
5088 #define ANEG_STATE_ACK_DETECT_INIT      7
5089 #define ANEG_STATE_ACK_DETECT           8
5090 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5091 #define ANEG_STATE_COMPLETE_ACK         10
5092 #define ANEG_STATE_IDLE_DETECT_INIT     11
5093 #define ANEG_STATE_IDLE_DETECT          12
5094 #define ANEG_STATE_LINK_OK              13
5095 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5096 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5097
5098         u32 flags;
5099 #define MR_AN_ENABLE            0x00000001
5100 #define MR_RESTART_AN           0x00000002
5101 #define MR_AN_COMPLETE          0x00000004
5102 #define MR_PAGE_RX              0x00000008
5103 #define MR_NP_LOADED            0x00000010
5104 #define MR_TOGGLE_TX            0x00000020
5105 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5106 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5107 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5108 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5109 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5110 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5111 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5112 #define MR_TOGGLE_RX            0x00002000
5113 #define MR_NP_RX                0x00004000
5114
5115 #define MR_LINK_OK              0x80000000
5116
5117         unsigned long link_time, cur_time;
5118
5119         u32 ability_match_cfg;
5120         int ability_match_count;
5121
5122         char ability_match, idle_match, ack_match;
5123
5124         u32 txconfig, rxconfig;
5125 #define ANEG_CFG_NP             0x00000080
5126 #define ANEG_CFG_ACK            0x00000040
5127 #define ANEG_CFG_RF2            0x00000020
5128 #define ANEG_CFG_RF1            0x00000010
5129 #define ANEG_CFG_PS2            0x00000001
5130 #define ANEG_CFG_PS1            0x00008000
5131 #define ANEG_CFG_HD             0x00004000
5132 #define ANEG_CFG_FD             0x00002000
5133 #define ANEG_CFG_INVAL          0x00001f06
5134
5135 };
5136 #define ANEG_OK         0
5137 #define ANEG_DONE       1
5138 #define ANEG_TIMER_ENAB 2
5139 #define ANEG_FAILED     -1
5140
5141 #define ANEG_STATE_SETTLE_TIME  10000
5142
5143 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5144                                    struct tg3_fiber_aneginfo *ap)
5145 {
5146         u16 flowctrl;
5147         unsigned long delta;
5148         u32 rx_cfg_reg;
5149         int ret;
5150
5151         if (ap->state == ANEG_STATE_UNKNOWN) {
5152                 ap->rxconfig = 0;
5153                 ap->link_time = 0;
5154                 ap->cur_time = 0;
5155                 ap->ability_match_cfg = 0;
5156                 ap->ability_match_count = 0;
5157                 ap->ability_match = 0;
5158                 ap->idle_match = 0;
5159                 ap->ack_match = 0;
5160         }
5161         ap->cur_time++;
5162
5163         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5164                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5165
5166                 if (rx_cfg_reg != ap->ability_match_cfg) {
5167                         ap->ability_match_cfg = rx_cfg_reg;
5168                         ap->ability_match = 0;
5169                         ap->ability_match_count = 0;
5170                 } else {
5171                         if (++ap->ability_match_count > 1) {
5172                                 ap->ability_match = 1;
5173                                 ap->ability_match_cfg = rx_cfg_reg;
5174                         }
5175                 }
5176                 if (rx_cfg_reg & ANEG_CFG_ACK)
5177                         ap->ack_match = 1;
5178                 else
5179                         ap->ack_match = 0;
5180
5181                 ap->idle_match = 0;
5182         } else {
5183                 ap->idle_match = 1;
5184                 ap->ability_match_cfg = 0;
5185                 ap->ability_match_count = 0;
5186                 ap->ability_match = 0;
5187                 ap->ack_match = 0;
5188
5189                 rx_cfg_reg = 0;
5190         }
5191
5192         ap->rxconfig = rx_cfg_reg;
5193         ret = ANEG_OK;
5194
5195         switch (ap->state) {
5196         case ANEG_STATE_UNKNOWN:
5197                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5198                         ap->state = ANEG_STATE_AN_ENABLE;
5199
5200                 /* fallthru */
5201         case ANEG_STATE_AN_ENABLE:
5202                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5203                 if (ap->flags & MR_AN_ENABLE) {
5204                         ap->link_time = 0;
5205                         ap->cur_time = 0;
5206                         ap->ability_match_cfg = 0;
5207                         ap->ability_match_count = 0;
5208                         ap->ability_match = 0;
5209                         ap->idle_match = 0;
5210                         ap->ack_match = 0;
5211
5212                         ap->state = ANEG_STATE_RESTART_INIT;
5213                 } else {
5214                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5215                 }
5216                 break;
5217
5218         case ANEG_STATE_RESTART_INIT:
5219                 ap->link_time = ap->cur_time;
5220                 ap->flags &= ~(MR_NP_LOADED);
5221                 ap->txconfig = 0;
5222                 tw32(MAC_TX_AUTO_NEG, 0);
5223                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5224                 tw32_f(MAC_MODE, tp->mac_mode);
5225                 udelay(40);
5226
5227                 ret = ANEG_TIMER_ENAB;
5228                 ap->state = ANEG_STATE_RESTART;
5229
5230                 /* fallthru */
5231         case ANEG_STATE_RESTART:
5232                 delta = ap->cur_time - ap->link_time;
5233                 if (delta > ANEG_STATE_SETTLE_TIME)
5234                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5235                 else
5236                         ret = ANEG_TIMER_ENAB;
5237                 break;
5238
5239         case ANEG_STATE_DISABLE_LINK_OK:
5240                 ret = ANEG_DONE;
5241                 break;
5242
5243         case ANEG_STATE_ABILITY_DETECT_INIT:
5244                 ap->flags &= ~(MR_TOGGLE_TX);
5245                 ap->txconfig = ANEG_CFG_FD;
5246                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247                 if (flowctrl & ADVERTISE_1000XPAUSE)
5248                         ap->txconfig |= ANEG_CFG_PS1;
5249                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5250                         ap->txconfig |= ANEG_CFG_PS2;
5251                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5252                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5253                 tw32_f(MAC_MODE, tp->mac_mode);
5254                 udelay(40);
5255
5256                 ap->state = ANEG_STATE_ABILITY_DETECT;
5257                 break;
5258
5259         case ANEG_STATE_ABILITY_DETECT:
5260                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5261                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5262                 break;
5263
5264         case ANEG_STATE_ACK_DETECT_INIT:
5265                 ap->txconfig |= ANEG_CFG_ACK;
5266                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5267                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5268                 tw32_f(MAC_MODE, tp->mac_mode);
5269                 udelay(40);
5270
5271                 ap->state = ANEG_STATE_ACK_DETECT;
5272
5273                 /* fallthru */
5274         case ANEG_STATE_ACK_DETECT:
5275                 if (ap->ack_match != 0) {
5276                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5277                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5278                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5279                         } else {
5280                                 ap->state = ANEG_STATE_AN_ENABLE;
5281                         }
5282                 } else if (ap->ability_match != 0 &&
5283                            ap->rxconfig == 0) {
5284                         ap->state = ANEG_STATE_AN_ENABLE;
5285                 }
5286                 break;
5287
5288         case ANEG_STATE_COMPLETE_ACK_INIT:
5289                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5290                         ret = ANEG_FAILED;
5291                         break;
5292                 }
5293                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5294                                MR_LP_ADV_HALF_DUPLEX |
5295                                MR_LP_ADV_SYM_PAUSE |
5296                                MR_LP_ADV_ASYM_PAUSE |
5297                                MR_LP_ADV_REMOTE_FAULT1 |
5298                                MR_LP_ADV_REMOTE_FAULT2 |
5299                                MR_LP_ADV_NEXT_PAGE |
5300                                MR_TOGGLE_RX |
5301                                MR_NP_RX);
5302                 if (ap->rxconfig & ANEG_CFG_FD)
5303                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5304                 if (ap->rxconfig & ANEG_CFG_HD)
5305                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5306                 if (ap->rxconfig & ANEG_CFG_PS1)
5307                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5308                 if (ap->rxconfig & ANEG_CFG_PS2)
5309                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5310                 if (ap->rxconfig & ANEG_CFG_RF1)
5311                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5312                 if (ap->rxconfig & ANEG_CFG_RF2)
5313                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5314                 if (ap->rxconfig & ANEG_CFG_NP)
5315                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5316
5317                 ap->link_time = ap->cur_time;
5318
5319                 ap->flags ^= (MR_TOGGLE_TX);
5320                 if (ap->rxconfig & 0x0008)
5321                         ap->flags |= MR_TOGGLE_RX;
5322                 if (ap->rxconfig & ANEG_CFG_NP)
5323                         ap->flags |= MR_NP_RX;
5324                 ap->flags |= MR_PAGE_RX;
5325
5326                 ap->state = ANEG_STATE_COMPLETE_ACK;
5327                 ret = ANEG_TIMER_ENAB;
5328                 break;
5329
5330         case ANEG_STATE_COMPLETE_ACK:
5331                 if (ap->ability_match != 0 &&
5332                     ap->rxconfig == 0) {
5333                         ap->state = ANEG_STATE_AN_ENABLE;
5334                         break;
5335                 }
5336                 delta = ap->cur_time - ap->link_time;
5337                 if (delta > ANEG_STATE_SETTLE_TIME) {
5338                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5339                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5340                         } else {
5341                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5342                                     !(ap->flags & MR_NP_RX)) {
5343                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5344                                 } else {
5345                                         ret = ANEG_FAILED;
5346                                 }
5347                         }
5348                 }
5349                 break;
5350
5351         case ANEG_STATE_IDLE_DETECT_INIT:
5352                 ap->link_time = ap->cur_time;
5353                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5354                 tw32_f(MAC_MODE, tp->mac_mode);
5355                 udelay(40);
5356
5357                 ap->state = ANEG_STATE_IDLE_DETECT;
5358                 ret = ANEG_TIMER_ENAB;
5359                 break;
5360
5361         case ANEG_STATE_IDLE_DETECT:
5362                 if (ap->ability_match != 0 &&
5363                     ap->rxconfig == 0) {
5364                         ap->state = ANEG_STATE_AN_ENABLE;
5365                         break;
5366                 }
5367                 delta = ap->cur_time - ap->link_time;
5368                 if (delta > ANEG_STATE_SETTLE_TIME) {
5369                         /* XXX another gem from the Broadcom driver :( */
5370                         ap->state = ANEG_STATE_LINK_OK;
5371                 }
5372                 break;
5373
5374         case ANEG_STATE_LINK_OK:
5375                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5376                 ret = ANEG_DONE;
5377                 break;
5378
5379         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5380                 /* ??? unimplemented */
5381                 break;
5382
5383         case ANEG_STATE_NEXT_PAGE_WAIT:
5384                 /* ??? unimplemented */
5385                 break;
5386
5387         default:
5388                 ret = ANEG_FAILED;
5389                 break;
5390         }
5391
5392         return ret;
5393 }
5394
5395 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5396 {
5397         int res = 0;
5398         struct tg3_fiber_aneginfo aninfo;
5399         int status = ANEG_FAILED;
5400         unsigned int tick;
5401         u32 tmp;
5402
5403         tw32_f(MAC_TX_AUTO_NEG, 0);
5404
5405         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5406         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5407         udelay(40);
5408
5409         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5410         udelay(40);
5411
5412         memset(&aninfo, 0, sizeof(aninfo));
5413         aninfo.flags |= MR_AN_ENABLE;
5414         aninfo.state = ANEG_STATE_UNKNOWN;
5415         aninfo.cur_time = 0;
5416         tick = 0;
5417         while (++tick < 195000) {
5418                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5419                 if (status == ANEG_DONE || status == ANEG_FAILED)
5420                         break;
5421
5422                 udelay(1);
5423         }
5424
5425         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5426         tw32_f(MAC_MODE, tp->mac_mode);
5427         udelay(40);
5428
5429         *txflags = aninfo.txconfig;
5430         *rxflags = aninfo.flags;
5431
5432         if (status == ANEG_DONE &&
5433             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5434                              MR_LP_ADV_FULL_DUPLEX)))
5435                 res = 1;
5436
5437         return res;
5438 }
5439
5440 static void tg3_init_bcm8002(struct tg3 *tp)
5441 {
5442         u32 mac_status = tr32(MAC_STATUS);
5443         int i;
5444
5445         /* Reset when initting first time or we have a link. */
5446         if (tg3_flag(tp, INIT_COMPLETE) &&
5447             !(mac_status & MAC_STATUS_PCS_SYNCED))
5448                 return;
5449
5450         /* Set PLL lock range. */
5451         tg3_writephy(tp, 0x16, 0x8007);
5452
5453         /* SW reset */
5454         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5455
5456         /* Wait for reset to complete. */
5457         /* XXX schedule_timeout() ... */
5458         for (i = 0; i < 500; i++)
5459                 udelay(10);
5460
5461         /* Config mode; select PMA/Ch 1 regs. */
5462         tg3_writephy(tp, 0x10, 0x8411);
5463
5464         /* Enable auto-lock and comdet, select txclk for tx. */
5465         tg3_writephy(tp, 0x11, 0x0a10);
5466
5467         tg3_writephy(tp, 0x18, 0x00a0);
5468         tg3_writephy(tp, 0x16, 0x41ff);
5469
5470         /* Assert and deassert POR. */
5471         tg3_writephy(tp, 0x13, 0x0400);
5472         udelay(40);
5473         tg3_writephy(tp, 0x13, 0x0000);
5474
5475         tg3_writephy(tp, 0x11, 0x0a50);
5476         udelay(40);
5477         tg3_writephy(tp, 0x11, 0x0a10);
5478
5479         /* Wait for signal to stabilize */
5480         /* XXX schedule_timeout() ... */
5481         for (i = 0; i < 15000; i++)
5482                 udelay(10);
5483
5484         /* Deselect the channel register so we can read the PHYID
5485          * later.
5486          */
5487         tg3_writephy(tp, 0x10, 0x8011);
5488 }
5489
5490 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5491 {
5492         u16 flowctrl;
5493         bool current_link_up;
5494         u32 sg_dig_ctrl, sg_dig_status;
5495         u32 serdes_cfg, expected_sg_dig_ctrl;
5496         int workaround, port_a;
5497
5498         serdes_cfg = 0;
5499         expected_sg_dig_ctrl = 0;
5500         workaround = 0;
5501         port_a = 1;
5502         current_link_up = false;
5503
5504         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5505             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5506                 workaround = 1;
5507                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5508                         port_a = 0;
5509
5510                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5511                 /* preserve bits 20-23 for voltage regulator */
5512                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5513         }
5514
5515         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5516
5517         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5518                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5519                         if (workaround) {
5520                                 u32 val = serdes_cfg;
5521
5522                                 if (port_a)
5523                                         val |= 0xc010000;
5524                                 else
5525                                         val |= 0x4010000;
5526                                 tw32_f(MAC_SERDES_CFG, val);
5527                         }
5528
5529                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5530                 }
5531                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5532                         tg3_setup_flow_control(tp, 0, 0);
5533                         current_link_up = true;
5534                 }
5535                 goto out;
5536         }
5537
5538         /* Want auto-negotiation.  */
5539         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5540
5541         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5542         if (flowctrl & ADVERTISE_1000XPAUSE)
5543                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5544         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5545                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5546
5547         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5548                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5549                     tp->serdes_counter &&
5550                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5551                                     MAC_STATUS_RCVD_CFG)) ==
5552                      MAC_STATUS_PCS_SYNCED)) {
5553                         tp->serdes_counter--;
5554                         current_link_up = true;
5555                         goto out;
5556                 }
5557 restart_autoneg:
5558                 if (workaround)
5559                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5560                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5561                 udelay(5);
5562                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5563
5564                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5565                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5566         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5567                                  MAC_STATUS_SIGNAL_DET)) {
5568                 sg_dig_status = tr32(SG_DIG_STATUS);
5569                 mac_status = tr32(MAC_STATUS);
5570
5571                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5572                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5573                         u32 local_adv = 0, remote_adv = 0;
5574
5575                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5576                                 local_adv |= ADVERTISE_1000XPAUSE;
5577                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5578                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5579
5580                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5581                                 remote_adv |= LPA_1000XPAUSE;
5582                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5583                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5584
5585                         tp->link_config.rmt_adv =
5586                                            mii_adv_to_ethtool_adv_x(remote_adv);
5587
5588                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5589                         current_link_up = true;
5590                         tp->serdes_counter = 0;
5591                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5592                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5593                         if (tp->serdes_counter)
5594                                 tp->serdes_counter--;
5595                         else {
5596                                 if (workaround) {
5597                                         u32 val = serdes_cfg;
5598
5599                                         if (port_a)
5600                                                 val |= 0xc010000;
5601                                         else
5602                                                 val |= 0x4010000;
5603
5604                                         tw32_f(MAC_SERDES_CFG, val);
5605                                 }
5606
5607                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5608                                 udelay(40);
5609
5610                                 /* Link parallel detection - link is up */
5611                                 /* only if we have PCS_SYNC and not */
5612                                 /* receiving config code words */
5613                                 mac_status = tr32(MAC_STATUS);
5614                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5615                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5616                                         tg3_setup_flow_control(tp, 0, 0);
5617                                         current_link_up = true;
5618                                         tp->phy_flags |=
5619                                                 TG3_PHYFLG_PARALLEL_DETECT;
5620                                         tp->serdes_counter =
5621                                                 SERDES_PARALLEL_DET_TIMEOUT;
5622                                 } else
5623                                         goto restart_autoneg;
5624                         }
5625                 }
5626         } else {
5627                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5628                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5629         }
5630
5631 out:
5632         return current_link_up;
5633 }
5634
5635 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5636 {
5637         bool current_link_up = false;
5638
5639         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5640                 goto out;
5641
5642         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5643                 u32 txflags, rxflags;
5644                 int i;
5645
5646                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5647                         u32 local_adv = 0, remote_adv = 0;
5648
5649                         if (txflags & ANEG_CFG_PS1)
5650                                 local_adv |= ADVERTISE_1000XPAUSE;
5651                         if (txflags & ANEG_CFG_PS2)
5652                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5653
5654                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5655                                 remote_adv |= LPA_1000XPAUSE;
5656                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5657                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5658
5659                         tp->link_config.rmt_adv =
5660                                            mii_adv_to_ethtool_adv_x(remote_adv);
5661
5662                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5663
5664                         current_link_up = true;
5665                 }
5666                 for (i = 0; i < 30; i++) {
5667                         udelay(20);
5668                         tw32_f(MAC_STATUS,
5669                                (MAC_STATUS_SYNC_CHANGED |
5670                                 MAC_STATUS_CFG_CHANGED));
5671                         udelay(40);
5672                         if ((tr32(MAC_STATUS) &
5673                              (MAC_STATUS_SYNC_CHANGED |
5674                               MAC_STATUS_CFG_CHANGED)) == 0)
5675                                 break;
5676                 }
5677
5678                 mac_status = tr32(MAC_STATUS);
5679                 if (!current_link_up &&
5680                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5681                     !(mac_status & MAC_STATUS_RCVD_CFG))
5682                         current_link_up = true;
5683         } else {
5684                 tg3_setup_flow_control(tp, 0, 0);
5685
5686                 /* Forcing 1000FD link up. */
5687                 current_link_up = true;
5688
5689                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5690                 udelay(40);
5691
5692                 tw32_f(MAC_MODE, tp->mac_mode);
5693                 udelay(40);
5694         }
5695
5696 out:
5697         return current_link_up;
5698 }
5699
5700 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5701 {
5702         u32 orig_pause_cfg;
5703         u16 orig_active_speed;
5704         u8 orig_active_duplex;
5705         u32 mac_status;
5706         bool current_link_up;
5707         int i;
5708
5709         orig_pause_cfg = tp->link_config.active_flowctrl;
5710         orig_active_speed = tp->link_config.active_speed;
5711         orig_active_duplex = tp->link_config.active_duplex;
5712
5713         if (!tg3_flag(tp, HW_AUTONEG) &&
5714             tp->link_up &&
5715             tg3_flag(tp, INIT_COMPLETE)) {
5716                 mac_status = tr32(MAC_STATUS);
5717                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5718                                MAC_STATUS_SIGNAL_DET |
5719                                MAC_STATUS_CFG_CHANGED |
5720                                MAC_STATUS_RCVD_CFG);
5721                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5722                                    MAC_STATUS_SIGNAL_DET)) {
5723                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5724                                             MAC_STATUS_CFG_CHANGED));
5725                         return 0;
5726                 }
5727         }
5728
5729         tw32_f(MAC_TX_AUTO_NEG, 0);
5730
5731         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5732         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5733         tw32_f(MAC_MODE, tp->mac_mode);
5734         udelay(40);
5735
5736         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5737                 tg3_init_bcm8002(tp);
5738
5739         /* Enable link change event even when serdes polling.  */
5740         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5741         udelay(40);
5742
5743         current_link_up = false;
5744         tp->link_config.rmt_adv = 0;
5745         mac_status = tr32(MAC_STATUS);
5746
5747         if (tg3_flag(tp, HW_AUTONEG))
5748                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5749         else
5750                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5751
5752         tp->napi[0].hw_status->status =
5753                 (SD_STATUS_UPDATED |
5754                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5755
5756         for (i = 0; i < 100; i++) {
5757                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5758                                     MAC_STATUS_CFG_CHANGED));
5759                 udelay(5);
5760                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5761                                          MAC_STATUS_CFG_CHANGED |
5762                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5763                         break;
5764         }
5765
5766         mac_status = tr32(MAC_STATUS);
5767         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5768                 current_link_up = false;
5769                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5770                     tp->serdes_counter == 0) {
5771                         tw32_f(MAC_MODE, (tp->mac_mode |
5772                                           MAC_MODE_SEND_CONFIGS));
5773                         udelay(1);
5774                         tw32_f(MAC_MODE, tp->mac_mode);
5775                 }
5776         }
5777
5778         if (current_link_up) {
5779                 tp->link_config.active_speed = SPEED_1000;
5780                 tp->link_config.active_duplex = DUPLEX_FULL;
5781                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5782                                     LED_CTRL_LNKLED_OVERRIDE |
5783                                     LED_CTRL_1000MBPS_ON));
5784         } else {
5785                 tp->link_config.active_speed = SPEED_UNKNOWN;
5786                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788                                     LED_CTRL_LNKLED_OVERRIDE |
5789                                     LED_CTRL_TRAFFIC_OVERRIDE));
5790         }
5791
5792         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5793                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5794                 if (orig_pause_cfg != now_pause_cfg ||
5795                     orig_active_speed != tp->link_config.active_speed ||
5796                     orig_active_duplex != tp->link_config.active_duplex)
5797                         tg3_link_report(tp);
5798         }
5799
5800         return 0;
5801 }
5802
5803 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5804 {
5805         int err = 0;
5806         u32 bmsr, bmcr;
5807         u16 current_speed = SPEED_UNKNOWN;
5808         u8 current_duplex = DUPLEX_UNKNOWN;
5809         bool current_link_up = false;
5810         u32 local_adv, remote_adv, sgsr;
5811
5812         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5813              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5814              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5815              (sgsr & SERDES_TG3_SGMII_MODE)) {
5816
5817                 if (force_reset)
5818                         tg3_phy_reset(tp);
5819
5820                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5821
5822                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5823                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824                 } else {
5825                         current_link_up = true;
5826                         if (sgsr & SERDES_TG3_SPEED_1000) {
5827                                 current_speed = SPEED_1000;
5828                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5830                                 current_speed = SPEED_100;
5831                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5832                         } else {
5833                                 current_speed = SPEED_10;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5835                         }
5836
5837                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5838                                 current_duplex = DUPLEX_FULL;
5839                         else
5840                                 current_duplex = DUPLEX_HALF;
5841                 }
5842
5843                 tw32_f(MAC_MODE, tp->mac_mode);
5844                 udelay(40);
5845
5846                 tg3_clear_mac_status(tp);
5847
5848                 goto fiber_setup_done;
5849         }
5850
5851         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5852         tw32_f(MAC_MODE, tp->mac_mode);
5853         udelay(40);
5854
5855         tg3_clear_mac_status(tp);
5856
5857         if (force_reset)
5858                 tg3_phy_reset(tp);
5859
5860         tp->link_config.rmt_adv = 0;
5861
5862         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5863         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5864         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5865                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5866                         bmsr |= BMSR_LSTATUS;
5867                 else
5868                         bmsr &= ~BMSR_LSTATUS;
5869         }
5870
5871         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5872
5873         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5874             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5875                 /* do nothing, just check for link up at the end */
5876         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5877                 u32 adv, newadv;
5878
5879                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5880                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5881                                  ADVERTISE_1000XPAUSE |
5882                                  ADVERTISE_1000XPSE_ASYM |
5883                                  ADVERTISE_SLCT);
5884
5885                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5886                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5887
5888                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5889                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5890                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5891                         tg3_writephy(tp, MII_BMCR, bmcr);
5892
5893                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5895                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5896
5897                         return err;
5898                 }
5899         } else {
5900                 u32 new_bmcr;
5901
5902                 bmcr &= ~BMCR_SPEED1000;
5903                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5904
5905                 if (tp->link_config.duplex == DUPLEX_FULL)
5906                         new_bmcr |= BMCR_FULLDPLX;
5907
5908                 if (new_bmcr != bmcr) {
5909                         /* BMCR_SPEED1000 is a reserved bit that needs
5910                          * to be set on write.
5911                          */
5912                         new_bmcr |= BMCR_SPEED1000;
5913
5914                         /* Force a linkdown */
5915                         if (tp->link_up) {
5916                                 u32 adv;
5917
5918                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5919                                 adv &= ~(ADVERTISE_1000XFULL |
5920                                          ADVERTISE_1000XHALF |
5921                                          ADVERTISE_SLCT);
5922                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5923                                 tg3_writephy(tp, MII_BMCR, bmcr |
5924                                                            BMCR_ANRESTART |
5925                                                            BMCR_ANENABLE);
5926                                 udelay(10);
5927                                 tg3_carrier_off(tp);
5928                         }
5929                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5930                         bmcr = new_bmcr;
5931                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5932                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5933                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5934                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5935                                         bmsr |= BMSR_LSTATUS;
5936                                 else
5937                                         bmsr &= ~BMSR_LSTATUS;
5938                         }
5939                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5940                 }
5941         }
5942
5943         if (bmsr & BMSR_LSTATUS) {
5944                 current_speed = SPEED_1000;
5945                 current_link_up = true;
5946                 if (bmcr & BMCR_FULLDPLX)
5947                         current_duplex = DUPLEX_FULL;
5948                 else
5949                         current_duplex = DUPLEX_HALF;
5950
5951                 local_adv = 0;
5952                 remote_adv = 0;
5953
5954                 if (bmcr & BMCR_ANENABLE) {
5955                         u32 common;
5956
5957                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5958                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5959                         common = local_adv & remote_adv;
5960                         if (common & (ADVERTISE_1000XHALF |
5961                                       ADVERTISE_1000XFULL)) {
5962                                 if (common & ADVERTISE_1000XFULL)
5963                                         current_duplex = DUPLEX_FULL;
5964                                 else
5965                                         current_duplex = DUPLEX_HALF;
5966
5967                                 tp->link_config.rmt_adv =
5968                                            mii_adv_to_ethtool_adv_x(remote_adv);
5969                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5970                                 /* Link is up via parallel detect */
5971                         } else {
5972                                 current_link_up = false;
5973                         }
5974                 }
5975         }
5976
5977 fiber_setup_done:
5978         if (current_link_up && current_duplex == DUPLEX_FULL)
5979                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5980
5981         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5982         if (tp->link_config.active_duplex == DUPLEX_HALF)
5983                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5984
5985         tw32_f(MAC_MODE, tp->mac_mode);
5986         udelay(40);
5987
5988         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5989
5990         tp->link_config.active_speed = current_speed;
5991         tp->link_config.active_duplex = current_duplex;
5992
5993         tg3_test_and_report_link_chg(tp, current_link_up);
5994         return err;
5995 }
5996
5997 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5998 {
5999         if (tp->serdes_counter) {
6000                 /* Give autoneg time to complete. */
6001                 tp->serdes_counter--;
6002                 return;
6003         }
6004
6005         if (!tp->link_up &&
6006             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6007                 u32 bmcr;
6008
6009                 tg3_readphy(tp, MII_BMCR, &bmcr);
6010                 if (bmcr & BMCR_ANENABLE) {
6011                         u32 phy1, phy2;
6012
6013                         /* Select shadow register 0x1f */
6014                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6015                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6016
6017                         /* Select expansion interrupt status register */
6018                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6019                                          MII_TG3_DSP_EXP1_INT_STAT);
6020                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6021                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6022
6023                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6024                                 /* We have signal detect and not receiving
6025                                  * config code words, link is up by parallel
6026                                  * detection.
6027                                  */
6028
6029                                 bmcr &= ~BMCR_ANENABLE;
6030                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6031                                 tg3_writephy(tp, MII_BMCR, bmcr);
6032                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6033                         }
6034                 }
6035         } else if (tp->link_up &&
6036                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6037                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6038                 u32 phy2;
6039
6040                 /* Select expansion interrupt status register */
6041                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6042                                  MII_TG3_DSP_EXP1_INT_STAT);
6043                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6044                 if (phy2 & 0x20) {
6045                         u32 bmcr;
6046
6047                         /* Config code words received, turn on autoneg. */
6048                         tg3_readphy(tp, MII_BMCR, &bmcr);
6049                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6050
6051                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6052
6053                 }
6054         }
6055 }
6056
6057 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6058 {
6059         u32 val;
6060         int err;
6061
6062         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6063                 err = tg3_setup_fiber_phy(tp, force_reset);
6064         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6065                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6066         else
6067                 err = tg3_setup_copper_phy(tp, force_reset);
6068
6069         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6070                 u32 scale;
6071
6072                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6073                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6074                         scale = 65;
6075                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6076                         scale = 6;
6077                 else
6078                         scale = 12;
6079
6080                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6081                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6082                 tw32(GRC_MISC_CFG, val);
6083         }
6084
6085         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6086               (6 << TX_LENGTHS_IPG_SHIFT);
6087         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6088             tg3_asic_rev(tp) == ASIC_REV_5762)
6089                 val |= tr32(MAC_TX_LENGTHS) &
6090                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6091                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6092
6093         if (tp->link_config.active_speed == SPEED_1000 &&
6094             tp->link_config.active_duplex == DUPLEX_HALF)
6095                 tw32(MAC_TX_LENGTHS, val |
6096                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6097         else
6098                 tw32(MAC_TX_LENGTHS, val |
6099                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6100
6101         if (!tg3_flag(tp, 5705_PLUS)) {
6102                 if (tp->link_up) {
6103                         tw32(HOSTCC_STAT_COAL_TICKS,
6104                              tp->coal.stats_block_coalesce_usecs);
6105                 } else {
6106                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6107                 }
6108         }
6109
6110         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6111                 val = tr32(PCIE_PWR_MGMT_THRESH);
6112                 if (!tp->link_up)
6113                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6114                               tp->pwrmgmt_thresh;
6115                 else
6116                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6117                 tw32(PCIE_PWR_MGMT_THRESH, val);
6118         }
6119
6120         return err;
6121 }
6122
6123 /* tp->lock must be held */
6124 static u64 tg3_refclk_read(struct tg3 *tp)
6125 {
6126         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6127         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6128 }
6129
6130 /* tp->lock must be held */
6131 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6132 {
6133         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6134
6135         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6136         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6137         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6138         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6139 }
6140
6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6142 static inline void tg3_full_unlock(struct tg3 *tp);
6143 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6144 {
6145         struct tg3 *tp = netdev_priv(dev);
6146
6147         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6148                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6149                                 SOF_TIMESTAMPING_SOFTWARE;
6150
6151         if (tg3_flag(tp, PTP_CAPABLE)) {
6152                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6153                                         SOF_TIMESTAMPING_RX_HARDWARE |
6154                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6155         }
6156
6157         if (tp->ptp_clock)
6158                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6159         else
6160                 info->phc_index = -1;
6161
6162         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6163
6164         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6165                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6166                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6167                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6168         return 0;
6169 }
6170
6171 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6172 {
6173         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6174         bool neg_adj = false;
6175         u32 correction = 0;
6176
6177         if (ppb < 0) {
6178                 neg_adj = true;
6179                 ppb = -ppb;
6180         }
6181
6182         /* Frequency adjustment is performed using hardware with a 24 bit
6183          * accumulator and a programmable correction value. On each clk, the
6184          * correction value gets added to the accumulator and when it
6185          * overflows, the time counter is incremented/decremented.
6186          *
6187          * So conversion from ppb to correction value is
6188          *              ppb * (1 << 24) / 1000000000
6189          */
6190         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6191                      TG3_EAV_REF_CLK_CORRECT_MASK;
6192
6193         tg3_full_lock(tp, 0);
6194
6195         if (correction)
6196                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6197                      TG3_EAV_REF_CLK_CORRECT_EN |
6198                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6199         else
6200                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6201
6202         tg3_full_unlock(tp);
6203
6204         return 0;
6205 }
6206
6207 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6208 {
6209         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6210
6211         tg3_full_lock(tp, 0);
6212         tp->ptp_adjust += delta;
6213         tg3_full_unlock(tp);
6214
6215         return 0;
6216 }
6217
6218 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6219 {
6220         u64 ns;
6221         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222
6223         tg3_full_lock(tp, 0);
6224         ns = tg3_refclk_read(tp);
6225         ns += tp->ptp_adjust;
6226         tg3_full_unlock(tp);
6227
6228         *ts = ns_to_timespec64(ns);
6229
6230         return 0;
6231 }
6232
6233 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6234                            const struct timespec64 *ts)
6235 {
6236         u64 ns;
6237         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6238
6239         ns = timespec64_to_ns(ts);
6240
6241         tg3_full_lock(tp, 0);
6242         tg3_refclk_write(tp, ns);
6243         tp->ptp_adjust = 0;
6244         tg3_full_unlock(tp);
6245
6246         return 0;
6247 }
6248
6249 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6250                           struct ptp_clock_request *rq, int on)
6251 {
6252         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6253         u32 clock_ctl;
6254         int rval = 0;
6255
6256         switch (rq->type) {
6257         case PTP_CLK_REQ_PEROUT:
6258                 if (rq->perout.index != 0)
6259                         return -EINVAL;
6260
6261                 tg3_full_lock(tp, 0);
6262                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6263                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6264
6265                 if (on) {
6266                         u64 nsec;
6267
6268                         nsec = rq->perout.start.sec * 1000000000ULL +
6269                                rq->perout.start.nsec;
6270
6271                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6272                                 netdev_warn(tp->dev,
6273                                             "Device supports only a one-shot timesync output, period must be 0\n");
6274                                 rval = -EINVAL;
6275                                 goto err_out;
6276                         }
6277
6278                         if (nsec & (1ULL << 63)) {
6279                                 netdev_warn(tp->dev,
6280                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6281                                 rval = -EINVAL;
6282                                 goto err_out;
6283                         }
6284
6285                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6286                         tw32(TG3_EAV_WATCHDOG0_MSB,
6287                              TG3_EAV_WATCHDOG0_EN |
6288                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6289
6290                         tw32(TG3_EAV_REF_CLCK_CTL,
6291                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6292                 } else {
6293                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6294                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6295                 }
6296
6297 err_out:
6298                 tg3_full_unlock(tp);
6299                 return rval;
6300
6301         default:
6302                 break;
6303         }
6304
6305         return -EOPNOTSUPP;
6306 }
6307
6308 static const struct ptp_clock_info tg3_ptp_caps = {
6309         .owner          = THIS_MODULE,
6310         .name           = "tg3 clock",
6311         .max_adj        = 250000000,
6312         .n_alarm        = 0,
6313         .n_ext_ts       = 0,
6314         .n_per_out      = 1,
6315         .n_pins         = 0,
6316         .pps            = 0,
6317         .adjfreq        = tg3_ptp_adjfreq,
6318         .adjtime        = tg3_ptp_adjtime,
6319         .gettime64      = tg3_ptp_gettime,
6320         .settime64      = tg3_ptp_settime,
6321         .enable         = tg3_ptp_enable,
6322 };
6323
6324 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6325                                      struct skb_shared_hwtstamps *timestamp)
6326 {
6327         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6328         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6329                                            tp->ptp_adjust);
6330 }
6331
6332 /* tp->lock must be held */
6333 static void tg3_ptp_init(struct tg3 *tp)
6334 {
6335         if (!tg3_flag(tp, PTP_CAPABLE))
6336                 return;
6337
6338         /* Initialize the hardware clock to the system time. */
6339         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6340         tp->ptp_adjust = 0;
6341         tp->ptp_info = tg3_ptp_caps;
6342 }
6343
6344 /* tp->lock must be held */
6345 static void tg3_ptp_resume(struct tg3 *tp)
6346 {
6347         if (!tg3_flag(tp, PTP_CAPABLE))
6348                 return;
6349
6350         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6351         tp->ptp_adjust = 0;
6352 }
6353
6354 static void tg3_ptp_fini(struct tg3 *tp)
6355 {
6356         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6357                 return;
6358
6359         ptp_clock_unregister(tp->ptp_clock);
6360         tp->ptp_clock = NULL;
6361         tp->ptp_adjust = 0;
6362 }
6363
6364 static inline int tg3_irq_sync(struct tg3 *tp)
6365 {
6366         return tp->irq_sync;
6367 }
6368
6369 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6370 {
6371         int i;
6372
6373         dst = (u32 *)((u8 *)dst + off);
6374         for (i = 0; i < len; i += sizeof(u32))
6375                 *dst++ = tr32(off + i);
6376 }
6377
6378 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6379 {
6380         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6381         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6382         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6383         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6384         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6385         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6386         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6387         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6388         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6389         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6390         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6391         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6392         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6393         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6394         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6395         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6396         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6397         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6398         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6399
6400         if (tg3_flag(tp, SUPPORT_MSIX))
6401                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6402
6403         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6404         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6405         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6406         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6407         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6408         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6409         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6410         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6411
6412         if (!tg3_flag(tp, 5705_PLUS)) {
6413                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6414                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6415                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6416         }
6417
6418         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6419         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6420         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6421         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6422         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6423
6424         if (tg3_flag(tp, NVRAM))
6425                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6426 }
6427
6428 static void tg3_dump_state(struct tg3 *tp)
6429 {
6430         int i;
6431         u32 *regs;
6432
6433         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6434         if (!regs)
6435                 return;
6436
6437         if (tg3_flag(tp, PCI_EXPRESS)) {
6438                 /* Read up to but not including private PCI registers */
6439                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6440                         regs[i / sizeof(u32)] = tr32(i);
6441         } else
6442                 tg3_dump_legacy_regs(tp, regs);
6443
6444         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6445                 if (!regs[i + 0] && !regs[i + 1] &&
6446                     !regs[i + 2] && !regs[i + 3])
6447                         continue;
6448
6449                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6450                            i * 4,
6451                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6452         }
6453
6454         kfree(regs);
6455
6456         for (i = 0; i < tp->irq_cnt; i++) {
6457                 struct tg3_napi *tnapi = &tp->napi[i];
6458
6459                 /* SW status block */
6460                 netdev_err(tp->dev,
6461                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6462                            i,
6463                            tnapi->hw_status->status,
6464                            tnapi->hw_status->status_tag,
6465                            tnapi->hw_status->rx_jumbo_consumer,
6466                            tnapi->hw_status->rx_consumer,
6467                            tnapi->hw_status->rx_mini_consumer,
6468                            tnapi->hw_status->idx[0].rx_producer,
6469                            tnapi->hw_status->idx[0].tx_consumer);
6470
6471                 netdev_err(tp->dev,
6472                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6473                            i,
6474                            tnapi->last_tag, tnapi->last_irq_tag,
6475                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6476                            tnapi->rx_rcb_ptr,
6477                            tnapi->prodring.rx_std_prod_idx,
6478                            tnapi->prodring.rx_std_cons_idx,
6479                            tnapi->prodring.rx_jmb_prod_idx,
6480                            tnapi->prodring.rx_jmb_cons_idx);
6481         }
6482 }
6483
6484 /* This is called whenever we suspect that the system chipset is re-
6485  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6486  * is bogus tx completions. We try to recover by setting the
6487  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6488  * in the workqueue.
6489  */
6490 static void tg3_tx_recover(struct tg3 *tp)
6491 {
6492         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6493                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6494
6495         netdev_warn(tp->dev,
6496                     "The system may be re-ordering memory-mapped I/O "
6497                     "cycles to the network device, attempting to recover. "
6498                     "Please report the problem to the driver maintainer "
6499                     "and include system chipset information.\n");
6500
6501         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6502 }
6503
6504 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6505 {
6506         /* Tell compiler to fetch tx indices from memory. */
6507         barrier();
6508         return tnapi->tx_pending -
6509                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6510 }
6511
6512 /* Tigon3 never reports partial packet sends.  So we do not
6513  * need special logic to handle SKBs that have not had all
6514  * of their frags sent yet, like SunGEM does.
6515  */
6516 static void tg3_tx(struct tg3_napi *tnapi)
6517 {
6518         struct tg3 *tp = tnapi->tp;
6519         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6520         u32 sw_idx = tnapi->tx_cons;
6521         struct netdev_queue *txq;
6522         int index = tnapi - tp->napi;
6523         unsigned int pkts_compl = 0, bytes_compl = 0;
6524
6525         if (tg3_flag(tp, ENABLE_TSS))
6526                 index--;
6527
6528         txq = netdev_get_tx_queue(tp->dev, index);
6529
6530         while (sw_idx != hw_idx) {
6531                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6532                 struct sk_buff *skb = ri->skb;
6533                 int i, tx_bug = 0;
6534
6535                 if (unlikely(skb == NULL)) {
6536                         tg3_tx_recover(tp);
6537                         return;
6538                 }
6539
6540                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6541                         struct skb_shared_hwtstamps timestamp;
6542                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6543                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6544
6545                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6546
6547                         skb_tstamp_tx(skb, &timestamp);
6548                 }
6549
6550                 pci_unmap_single(tp->pdev,
6551                                  dma_unmap_addr(ri, mapping),
6552                                  skb_headlen(skb),
6553                                  PCI_DMA_TODEVICE);
6554
6555                 ri->skb = NULL;
6556
6557                 while (ri->fragmented) {
6558                         ri->fragmented = false;
6559                         sw_idx = NEXT_TX(sw_idx);
6560                         ri = &tnapi->tx_buffers[sw_idx];
6561                 }
6562
6563                 sw_idx = NEXT_TX(sw_idx);
6564
6565                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6566                         ri = &tnapi->tx_buffers[sw_idx];
6567                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6568                                 tx_bug = 1;
6569
6570                         pci_unmap_page(tp->pdev,
6571                                        dma_unmap_addr(ri, mapping),
6572                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6573                                        PCI_DMA_TODEVICE);
6574
6575                         while (ri->fragmented) {
6576                                 ri->fragmented = false;
6577                                 sw_idx = NEXT_TX(sw_idx);
6578                                 ri = &tnapi->tx_buffers[sw_idx];
6579                         }
6580
6581                         sw_idx = NEXT_TX(sw_idx);
6582                 }
6583
6584                 pkts_compl++;
6585                 bytes_compl += skb->len;
6586
6587                 dev_kfree_skb_any(skb);
6588
6589                 if (unlikely(tx_bug)) {
6590                         tg3_tx_recover(tp);
6591                         return;
6592                 }
6593         }
6594
6595         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6596
6597         tnapi->tx_cons = sw_idx;
6598
6599         /* Need to make the tx_cons update visible to tg3_start_xmit()
6600          * before checking for netif_queue_stopped().  Without the
6601          * memory barrier, there is a small possibility that tg3_start_xmit()
6602          * will miss it and cause the queue to be stopped forever.
6603          */
6604         smp_mb();
6605
6606         if (unlikely(netif_tx_queue_stopped(txq) &&
6607                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6608                 __netif_tx_lock(txq, smp_processor_id());
6609                 if (netif_tx_queue_stopped(txq) &&
6610                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6611                         netif_tx_wake_queue(txq);
6612                 __netif_tx_unlock(txq);
6613         }
6614 }
6615
6616 static void tg3_frag_free(bool is_frag, void *data)
6617 {
6618         if (is_frag)
6619                 skb_free_frag(data);
6620         else
6621                 kfree(data);
6622 }
6623
6624 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6625 {
6626         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6627                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6628
6629         if (!ri->data)
6630                 return;
6631
6632         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6633                          map_sz, PCI_DMA_FROMDEVICE);
6634         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6635         ri->data = NULL;
6636 }
6637
6638
6639 /* Returns size of skb allocated or < 0 on error.
6640  *
6641  * We only need to fill in the address because the other members
6642  * of the RX descriptor are invariant, see tg3_init_rings.
6643  *
6644  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6645  * posting buffers we only dirty the first cache line of the RX
6646  * descriptor (containing the address).  Whereas for the RX status
6647  * buffers the cpu only reads the last cacheline of the RX descriptor
6648  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6649  */
6650 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6651                              u32 opaque_key, u32 dest_idx_unmasked,
6652                              unsigned int *frag_size)
6653 {
6654         struct tg3_rx_buffer_desc *desc;
6655         struct ring_info *map;
6656         u8 *data;
6657         dma_addr_t mapping;
6658         int skb_size, data_size, dest_idx;
6659
6660         switch (opaque_key) {
6661         case RXD_OPAQUE_RING_STD:
6662                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6663                 desc = &tpr->rx_std[dest_idx];
6664                 map = &tpr->rx_std_buffers[dest_idx];
6665                 data_size = tp->rx_pkt_map_sz;
6666                 break;
6667
6668         case RXD_OPAQUE_RING_JUMBO:
6669                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6670                 desc = &tpr->rx_jmb[dest_idx].std;
6671                 map = &tpr->rx_jmb_buffers[dest_idx];
6672                 data_size = TG3_RX_JMB_MAP_SZ;
6673                 break;
6674
6675         default:
6676                 return -EINVAL;
6677         }
6678
6679         /* Do not overwrite any of the map or rp information
6680          * until we are sure we can commit to a new buffer.
6681          *
6682          * Callers depend upon this behavior and assume that
6683          * we leave everything unchanged if we fail.
6684          */
6685         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6686                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6687         if (skb_size <= PAGE_SIZE) {
6688                 data = netdev_alloc_frag(skb_size);
6689                 *frag_size = skb_size;
6690         } else {
6691                 data = kmalloc(skb_size, GFP_ATOMIC);
6692                 *frag_size = 0;
6693         }
6694         if (!data)
6695                 return -ENOMEM;
6696
6697         mapping = pci_map_single(tp->pdev,
6698                                  data + TG3_RX_OFFSET(tp),
6699                                  data_size,
6700                                  PCI_DMA_FROMDEVICE);
6701         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6702                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6703                 return -EIO;
6704         }
6705
6706         map->data = data;
6707         dma_unmap_addr_set(map, mapping, mapping);
6708
6709         desc->addr_hi = ((u64)mapping >> 32);
6710         desc->addr_lo = ((u64)mapping & 0xffffffff);
6711
6712         return data_size;
6713 }
6714
6715 /* We only need to move over in the address because the other
6716  * members of the RX descriptor are invariant.  See notes above
6717  * tg3_alloc_rx_data for full details.
6718  */
6719 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6720                            struct tg3_rx_prodring_set *dpr,
6721                            u32 opaque_key, int src_idx,
6722                            u32 dest_idx_unmasked)
6723 {
6724         struct tg3 *tp = tnapi->tp;
6725         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6726         struct ring_info *src_map, *dest_map;
6727         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6728         int dest_idx;
6729
6730         switch (opaque_key) {
6731         case RXD_OPAQUE_RING_STD:
6732                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6733                 dest_desc = &dpr->rx_std[dest_idx];
6734                 dest_map = &dpr->rx_std_buffers[dest_idx];
6735                 src_desc = &spr->rx_std[src_idx];
6736                 src_map = &spr->rx_std_buffers[src_idx];
6737                 break;
6738
6739         case RXD_OPAQUE_RING_JUMBO:
6740                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6741                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6742                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6743                 src_desc = &spr->rx_jmb[src_idx].std;
6744                 src_map = &spr->rx_jmb_buffers[src_idx];
6745                 break;
6746
6747         default:
6748                 return;
6749         }
6750
6751         dest_map->data = src_map->data;
6752         dma_unmap_addr_set(dest_map, mapping,
6753                            dma_unmap_addr(src_map, mapping));
6754         dest_desc->addr_hi = src_desc->addr_hi;
6755         dest_desc->addr_lo = src_desc->addr_lo;
6756
6757         /* Ensure that the update to the skb happens after the physical
6758          * addresses have been transferred to the new BD location.
6759          */
6760         smp_wmb();
6761
6762         src_map->data = NULL;
6763 }
6764
6765 /* The RX ring scheme is composed of multiple rings which post fresh
6766  * buffers to the chip, and one special ring the chip uses to report
6767  * status back to the host.
6768  *
6769  * The special ring reports the status of received packets to the
6770  * host.  The chip does not write into the original descriptor the
6771  * RX buffer was obtained from.  The chip simply takes the original
6772  * descriptor as provided by the host, updates the status and length
6773  * field, then writes this into the next status ring entry.
6774  *
6775  * Each ring the host uses to post buffers to the chip is described
6776  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6777  * it is first placed into the on-chip ram.  When the packet's length
6778  * is known, it walks down the TG3_BDINFO entries to select the ring.
6779  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6780  * which is within the range of the new packet's length is chosen.
6781  *
6782  * The "separate ring for rx status" scheme may sound queer, but it makes
6783  * sense from a cache coherency perspective.  If only the host writes
6784  * to the buffer post rings, and only the chip writes to the rx status
6785  * rings, then cache lines never move beyond shared-modified state.
6786  * If both the host and chip were to write into the same ring, cache line
6787  * eviction could occur since both entities want it in an exclusive state.
6788  */
6789 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6790 {
6791         struct tg3 *tp = tnapi->tp;
6792         u32 work_mask, rx_std_posted = 0;
6793         u32 std_prod_idx, jmb_prod_idx;
6794         u32 sw_idx = tnapi->rx_rcb_ptr;
6795         u16 hw_idx;
6796         int received;
6797         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6798
6799         hw_idx = *(tnapi->rx_rcb_prod_idx);
6800         /*
6801          * We need to order the read of hw_idx and the read of
6802          * the opaque cookie.
6803          */
6804         rmb();
6805         work_mask = 0;
6806         received = 0;
6807         std_prod_idx = tpr->rx_std_prod_idx;
6808         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6809         while (sw_idx != hw_idx && budget > 0) {
6810                 struct ring_info *ri;
6811                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6812                 unsigned int len;
6813                 struct sk_buff *skb;
6814                 dma_addr_t dma_addr;
6815                 u32 opaque_key, desc_idx, *post_ptr;
6816                 u8 *data;
6817                 u64 tstamp = 0;
6818
6819                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6820                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6821                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6822                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6823                         dma_addr = dma_unmap_addr(ri, mapping);
6824                         data = ri->data;
6825                         post_ptr = &std_prod_idx;
6826                         rx_std_posted++;
6827                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6828                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6829                         dma_addr = dma_unmap_addr(ri, mapping);
6830                         data = ri->data;
6831                         post_ptr = &jmb_prod_idx;
6832                 } else
6833                         goto next_pkt_nopost;
6834
6835                 work_mask |= opaque_key;
6836
6837                 if (desc->err_vlan & RXD_ERR_MASK) {
6838                 drop_it:
6839                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6840                                        desc_idx, *post_ptr);
6841                 drop_it_no_recycle:
6842                         /* Other statistics kept track of by card. */
6843                         tp->rx_dropped++;
6844                         goto next_pkt;
6845                 }
6846
6847                 prefetch(data + TG3_RX_OFFSET(tp));
6848                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6849                       ETH_FCS_LEN;
6850
6851                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6852                      RXD_FLAG_PTPSTAT_PTPV1 ||
6853                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854                      RXD_FLAG_PTPSTAT_PTPV2) {
6855                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6856                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6857                 }
6858
6859                 if (len > TG3_RX_COPY_THRESH(tp)) {
6860                         int skb_size;
6861                         unsigned int frag_size;
6862
6863                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6864                                                     *post_ptr, &frag_size);
6865                         if (skb_size < 0)
6866                                 goto drop_it;
6867
6868                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6869                                          PCI_DMA_FROMDEVICE);
6870
6871                         /* Ensure that the update to the data happens
6872                          * after the usage of the old DMA mapping.
6873                          */
6874                         smp_wmb();
6875
6876                         ri->data = NULL;
6877
6878                         skb = build_skb(data, frag_size);
6879                         if (!skb) {
6880                                 tg3_frag_free(frag_size != 0, data);
6881                                 goto drop_it_no_recycle;
6882                         }
6883                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6884                 } else {
6885                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6886                                        desc_idx, *post_ptr);
6887
6888                         skb = netdev_alloc_skb(tp->dev,
6889                                                len + TG3_RAW_IP_ALIGN);
6890                         if (skb == NULL)
6891                                 goto drop_it_no_recycle;
6892
6893                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6894                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6895                         memcpy(skb->data,
6896                                data + TG3_RX_OFFSET(tp),
6897                                len);
6898                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6899                 }
6900
6901                 skb_put(skb, len);
6902                 if (tstamp)
6903                         tg3_hwclock_to_timestamp(tp, tstamp,
6904                                                  skb_hwtstamps(skb));
6905
6906                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6907                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6908                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6909                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6910                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6911                 else
6912                         skb_checksum_none_assert(skb);
6913
6914                 skb->protocol = eth_type_trans(skb, tp->dev);
6915
6916                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6917                     skb->protocol != htons(ETH_P_8021Q) &&
6918                     skb->protocol != htons(ETH_P_8021AD)) {
6919                         dev_kfree_skb_any(skb);
6920                         goto drop_it_no_recycle;
6921                 }
6922
6923                 if (desc->type_flags & RXD_FLAG_VLAN &&
6924                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6925                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6926                                                desc->err_vlan & RXD_VLAN_MASK);
6927
6928                 napi_gro_receive(&tnapi->napi, skb);
6929
6930                 received++;
6931                 budget--;
6932
6933 next_pkt:
6934                 (*post_ptr)++;
6935
6936                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6937                         tpr->rx_std_prod_idx = std_prod_idx &
6938                                                tp->rx_std_ring_mask;
6939                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6940                                      tpr->rx_std_prod_idx);
6941                         work_mask &= ~RXD_OPAQUE_RING_STD;
6942                         rx_std_posted = 0;
6943                 }
6944 next_pkt_nopost:
6945                 sw_idx++;
6946                 sw_idx &= tp->rx_ret_ring_mask;
6947
6948                 /* Refresh hw_idx to see if there is new work */
6949                 if (sw_idx == hw_idx) {
6950                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6951                         rmb();
6952                 }
6953         }
6954
6955         /* ACK the status ring. */
6956         tnapi->rx_rcb_ptr = sw_idx;
6957         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6958
6959         /* Refill RX ring(s). */
6960         if (!tg3_flag(tp, ENABLE_RSS)) {
6961                 /* Sync BD data before updating mailbox */
6962                 wmb();
6963
6964                 if (work_mask & RXD_OPAQUE_RING_STD) {
6965                         tpr->rx_std_prod_idx = std_prod_idx &
6966                                                tp->rx_std_ring_mask;
6967                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6968                                      tpr->rx_std_prod_idx);
6969                 }
6970                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6971                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6972                                                tp->rx_jmb_ring_mask;
6973                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6974                                      tpr->rx_jmb_prod_idx);
6975                 }
6976                 mmiowb();
6977         } else if (work_mask) {
6978                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6979                  * updated before the producer indices can be updated.
6980                  */
6981                 smp_wmb();
6982
6983                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6984                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6985
6986                 if (tnapi != &tp->napi[1]) {
6987                         tp->rx_refill = true;
6988                         napi_schedule(&tp->napi[1].napi);
6989                 }
6990         }
6991
6992         return received;
6993 }
6994
6995 static void tg3_poll_link(struct tg3 *tp)
6996 {
6997         /* handle link change and other phy events */
6998         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6999                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7000
7001                 if (sblk->status & SD_STATUS_LINK_CHG) {
7002                         sblk->status = SD_STATUS_UPDATED |
7003                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7004                         spin_lock(&tp->lock);
7005                         if (tg3_flag(tp, USE_PHYLIB)) {
7006                                 tw32_f(MAC_STATUS,
7007                                      (MAC_STATUS_SYNC_CHANGED |
7008                                       MAC_STATUS_CFG_CHANGED |
7009                                       MAC_STATUS_MI_COMPLETION |
7010                                       MAC_STATUS_LNKSTATE_CHANGED));
7011                                 udelay(40);
7012                         } else
7013                                 tg3_setup_phy(tp, false);
7014                         spin_unlock(&tp->lock);
7015                 }
7016         }
7017 }
7018
7019 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7020                                 struct tg3_rx_prodring_set *dpr,
7021                                 struct tg3_rx_prodring_set *spr)
7022 {
7023         u32 si, di, cpycnt, src_prod_idx;
7024         int i, err = 0;
7025
7026         while (1) {
7027                 src_prod_idx = spr->rx_std_prod_idx;
7028
7029                 /* Make sure updates to the rx_std_buffers[] entries and the
7030                  * standard producer index are seen in the correct order.
7031                  */
7032                 smp_rmb();
7033
7034                 if (spr->rx_std_cons_idx == src_prod_idx)
7035                         break;
7036
7037                 if (spr->rx_std_cons_idx < src_prod_idx)
7038                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7039                 else
7040                         cpycnt = tp->rx_std_ring_mask + 1 -
7041                                  spr->rx_std_cons_idx;
7042
7043                 cpycnt = min(cpycnt,
7044                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7045
7046                 si = spr->rx_std_cons_idx;
7047                 di = dpr->rx_std_prod_idx;
7048
7049                 for (i = di; i < di + cpycnt; i++) {
7050                         if (dpr->rx_std_buffers[i].data) {
7051                                 cpycnt = i - di;
7052                                 err = -ENOSPC;
7053                                 break;
7054                         }
7055                 }
7056
7057                 if (!cpycnt)
7058                         break;
7059
7060                 /* Ensure that updates to the rx_std_buffers ring and the
7061                  * shadowed hardware producer ring from tg3_recycle_skb() are
7062                  * ordered correctly WRT the skb check above.
7063                  */
7064                 smp_rmb();
7065
7066                 memcpy(&dpr->rx_std_buffers[di],
7067                        &spr->rx_std_buffers[si],
7068                        cpycnt * sizeof(struct ring_info));
7069
7070                 for (i = 0; i < cpycnt; i++, di++, si++) {
7071                         struct tg3_rx_buffer_desc *sbd, *dbd;
7072                         sbd = &spr->rx_std[si];
7073                         dbd = &dpr->rx_std[di];
7074                         dbd->addr_hi = sbd->addr_hi;
7075                         dbd->addr_lo = sbd->addr_lo;
7076                 }
7077
7078                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7079                                        tp->rx_std_ring_mask;
7080                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7081                                        tp->rx_std_ring_mask;
7082         }
7083
7084         while (1) {
7085                 src_prod_idx = spr->rx_jmb_prod_idx;
7086
7087                 /* Make sure updates to the rx_jmb_buffers[] entries and
7088                  * the jumbo producer index are seen in the correct order.
7089                  */
7090                 smp_rmb();
7091
7092                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7093                         break;
7094
7095                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7096                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7097                 else
7098                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7099                                  spr->rx_jmb_cons_idx;
7100
7101                 cpycnt = min(cpycnt,
7102                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7103
7104                 si = spr->rx_jmb_cons_idx;
7105                 di = dpr->rx_jmb_prod_idx;
7106
7107                 for (i = di; i < di + cpycnt; i++) {
7108                         if (dpr->rx_jmb_buffers[i].data) {
7109                                 cpycnt = i - di;
7110                                 err = -ENOSPC;
7111                                 break;
7112                         }
7113                 }
7114
7115                 if (!cpycnt)
7116                         break;
7117
7118                 /* Ensure that updates to the rx_jmb_buffers ring and the
7119                  * shadowed hardware producer ring from tg3_recycle_skb() are
7120                  * ordered correctly WRT the skb check above.
7121                  */
7122                 smp_rmb();
7123
7124                 memcpy(&dpr->rx_jmb_buffers[di],
7125                        &spr->rx_jmb_buffers[si],
7126                        cpycnt * sizeof(struct ring_info));
7127
7128                 for (i = 0; i < cpycnt; i++, di++, si++) {
7129                         struct tg3_rx_buffer_desc *sbd, *dbd;
7130                         sbd = &spr->rx_jmb[si].std;
7131                         dbd = &dpr->rx_jmb[di].std;
7132                         dbd->addr_hi = sbd->addr_hi;
7133                         dbd->addr_lo = sbd->addr_lo;
7134                 }
7135
7136                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7137                                        tp->rx_jmb_ring_mask;
7138                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7139                                        tp->rx_jmb_ring_mask;
7140         }
7141
7142         return err;
7143 }
7144
7145 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7146 {
7147         struct tg3 *tp = tnapi->tp;
7148
7149         /* run TX completion thread */
7150         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7151                 tg3_tx(tnapi);
7152                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7153                         return work_done;
7154         }
7155
7156         if (!tnapi->rx_rcb_prod_idx)
7157                 return work_done;
7158
7159         /* run RX thread, within the bounds set by NAPI.
7160          * All RX "locking" is done by ensuring outside
7161          * code synchronizes with tg3->napi.poll()
7162          */
7163         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7164                 work_done += tg3_rx(tnapi, budget - work_done);
7165
7166         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7167                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7168                 int i, err = 0;
7169                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7170                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7171
7172                 tp->rx_refill = false;
7173                 for (i = 1; i <= tp->rxq_cnt; i++)
7174                         err |= tg3_rx_prodring_xfer(tp, dpr,
7175                                                     &tp->napi[i].prodring);
7176
7177                 wmb();
7178
7179                 if (std_prod_idx != dpr->rx_std_prod_idx)
7180                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7181                                      dpr->rx_std_prod_idx);
7182
7183                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7184                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7185                                      dpr->rx_jmb_prod_idx);
7186
7187                 mmiowb();
7188
7189                 if (err)
7190                         tw32_f(HOSTCC_MODE, tp->coal_now);
7191         }
7192
7193         return work_done;
7194 }
7195
7196 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7197 {
7198         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7199                 schedule_work(&tp->reset_task);
7200 }
7201
7202 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7203 {
7204         cancel_work_sync(&tp->reset_task);
7205         tg3_flag_clear(tp, RESET_TASK_PENDING);
7206         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7207 }
7208
7209 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7210 {
7211         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7212         struct tg3 *tp = tnapi->tp;
7213         int work_done = 0;
7214         struct tg3_hw_status *sblk = tnapi->hw_status;
7215
7216         while (1) {
7217                 work_done = tg3_poll_work(tnapi, work_done, budget);
7218
7219                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7220                         goto tx_recovery;
7221
7222                 if (unlikely(work_done >= budget))
7223                         break;
7224
7225                 /* tp->last_tag is used in tg3_int_reenable() below
7226                  * to tell the hw how much work has been processed,
7227                  * so we must read it before checking for more work.
7228                  */
7229                 tnapi->last_tag = sblk->status_tag;
7230                 tnapi->last_irq_tag = tnapi->last_tag;
7231                 rmb();
7232
7233                 /* check for RX/TX work to do */
7234                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7235                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7236
7237                         /* This test here is not race free, but will reduce
7238                          * the number of interrupts by looping again.
7239                          */
7240                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7241                                 continue;
7242
7243                         napi_complete_done(napi, work_done);
7244                         /* Reenable interrupts. */
7245                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7246
7247                         /* This test here is synchronized by napi_schedule()
7248                          * and napi_complete() to close the race condition.
7249                          */
7250                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7251                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7252                                                   HOSTCC_MODE_ENABLE |
7253                                                   tnapi->coal_now);
7254                         }
7255                         mmiowb();
7256                         break;
7257                 }
7258         }
7259
7260         return work_done;
7261
7262 tx_recovery:
7263         /* work_done is guaranteed to be less than budget. */
7264         napi_complete(napi);
7265         tg3_reset_task_schedule(tp);
7266         return work_done;
7267 }
7268
7269 static void tg3_process_error(struct tg3 *tp)
7270 {
7271         u32 val;
7272         bool real_error = false;
7273
7274         if (tg3_flag(tp, ERROR_PROCESSED))
7275                 return;
7276
7277         /* Check Flow Attention register */
7278         val = tr32(HOSTCC_FLOW_ATTN);
7279         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7280                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7281                 real_error = true;
7282         }
7283
7284         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7285                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7286                 real_error = true;
7287         }
7288
7289         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7290                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7291                 real_error = true;
7292         }
7293
7294         if (!real_error)
7295                 return;
7296
7297         tg3_dump_state(tp);
7298
7299         tg3_flag_set(tp, ERROR_PROCESSED);
7300         tg3_reset_task_schedule(tp);
7301 }
7302
7303 static int tg3_poll(struct napi_struct *napi, int budget)
7304 {
7305         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7306         struct tg3 *tp = tnapi->tp;
7307         int work_done = 0;
7308         struct tg3_hw_status *sblk = tnapi->hw_status;
7309
7310         while (1) {
7311                 if (sblk->status & SD_STATUS_ERROR)
7312                         tg3_process_error(tp);
7313
7314                 tg3_poll_link(tp);
7315
7316                 work_done = tg3_poll_work(tnapi, work_done, budget);
7317
7318                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7319                         goto tx_recovery;
7320
7321                 if (unlikely(work_done >= budget))
7322                         break;
7323
7324                 if (tg3_flag(tp, TAGGED_STATUS)) {
7325                         /* tp->last_tag is used in tg3_int_reenable() below
7326                          * to tell the hw how much work has been processed,
7327                          * so we must read it before checking for more work.
7328                          */
7329                         tnapi->last_tag = sblk->status_tag;
7330                         tnapi->last_irq_tag = tnapi->last_tag;
7331                         rmb();
7332                 } else
7333                         sblk->status &= ~SD_STATUS_UPDATED;
7334
7335                 if (likely(!tg3_has_work(tnapi))) {
7336                         napi_complete_done(napi, work_done);
7337                         tg3_int_reenable(tnapi);
7338                         break;
7339                 }
7340         }
7341
7342         return work_done;
7343
7344 tx_recovery:
7345         /* work_done is guaranteed to be less than budget. */
7346         napi_complete(napi);
7347         tg3_reset_task_schedule(tp);
7348         return work_done;
7349 }
7350
7351 static void tg3_napi_disable(struct tg3 *tp)
7352 {
7353         int i;
7354
7355         for (i = tp->irq_cnt - 1; i >= 0; i--)
7356                 napi_disable(&tp->napi[i].napi);
7357 }
7358
7359 static void tg3_napi_enable(struct tg3 *tp)
7360 {
7361         int i;
7362
7363         for (i = 0; i < tp->irq_cnt; i++)
7364                 napi_enable(&tp->napi[i].napi);
7365 }
7366
7367 static void tg3_napi_init(struct tg3 *tp)
7368 {
7369         int i;
7370
7371         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7372         for (i = 1; i < tp->irq_cnt; i++)
7373                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7374 }
7375
7376 static void tg3_napi_fini(struct tg3 *tp)
7377 {
7378         int i;
7379
7380         for (i = 0; i < tp->irq_cnt; i++)
7381                 netif_napi_del(&tp->napi[i].napi);
7382 }
7383
7384 static inline void tg3_netif_stop(struct tg3 *tp)
7385 {
7386         netif_trans_update(tp->dev);    /* prevent tx timeout */
7387         tg3_napi_disable(tp);
7388         netif_carrier_off(tp->dev);
7389         netif_tx_disable(tp->dev);
7390 }
7391
7392 /* tp->lock must be held */
7393 static inline void tg3_netif_start(struct tg3 *tp)
7394 {
7395         tg3_ptp_resume(tp);
7396
7397         /* NOTE: unconditional netif_tx_wake_all_queues is only
7398          * appropriate so long as all callers are assured to
7399          * have free tx slots (such as after tg3_init_hw)
7400          */
7401         netif_tx_wake_all_queues(tp->dev);
7402
7403         if (tp->link_up)
7404                 netif_carrier_on(tp->dev);
7405
7406         tg3_napi_enable(tp);
7407         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7408         tg3_enable_ints(tp);
7409 }
7410
7411 static void tg3_irq_quiesce(struct tg3 *tp)
7412         __releases(tp->lock)
7413         __acquires(tp->lock)
7414 {
7415         int i;
7416
7417         BUG_ON(tp->irq_sync);
7418
7419         tp->irq_sync = 1;
7420         smp_mb();
7421
7422         spin_unlock_bh(&tp->lock);
7423
7424         for (i = 0; i < tp->irq_cnt; i++)
7425                 synchronize_irq(tp->napi[i].irq_vec);
7426
7427         spin_lock_bh(&tp->lock);
7428 }
7429
7430 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7431  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7432  * with as well.  Most of the time, this is not necessary except when
7433  * shutting down the device.
7434  */
7435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7436 {
7437         spin_lock_bh(&tp->lock);
7438         if (irq_sync)
7439                 tg3_irq_quiesce(tp);
7440 }
7441
7442 static inline void tg3_full_unlock(struct tg3 *tp)
7443 {
7444         spin_unlock_bh(&tp->lock);
7445 }
7446
7447 /* One-shot MSI handler - Chip automatically disables interrupt
7448  * after sending MSI so driver doesn't have to do it.
7449  */
7450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7451 {
7452         struct tg3_napi *tnapi = dev_id;
7453         struct tg3 *tp = tnapi->tp;
7454
7455         prefetch(tnapi->hw_status);
7456         if (tnapi->rx_rcb)
7457                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7458
7459         if (likely(!tg3_irq_sync(tp)))
7460                 napi_schedule(&tnapi->napi);
7461
7462         return IRQ_HANDLED;
7463 }
7464
7465 /* MSI ISR - No need to check for interrupt sharing and no need to
7466  * flush status block and interrupt mailbox. PCI ordering rules
7467  * guarantee that MSI will arrive after the status block.
7468  */
7469 static irqreturn_t tg3_msi(int irq, void *dev_id)
7470 {
7471         struct tg3_napi *tnapi = dev_id;
7472         struct tg3 *tp = tnapi->tp;
7473
7474         prefetch(tnapi->hw_status);
7475         if (tnapi->rx_rcb)
7476                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7477         /*
7478          * Writing any value to intr-mbox-0 clears PCI INTA# and
7479          * chip-internal interrupt pending events.
7480          * Writing non-zero to intr-mbox-0 additional tells the
7481          * NIC to stop sending us irqs, engaging "in-intr-handler"
7482          * event coalescing.
7483          */
7484         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7485         if (likely(!tg3_irq_sync(tp)))
7486                 napi_schedule(&tnapi->napi);
7487
7488         return IRQ_RETVAL(1);
7489 }
7490
7491 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7492 {
7493         struct tg3_napi *tnapi = dev_id;
7494         struct tg3 *tp = tnapi->tp;
7495         struct tg3_hw_status *sblk = tnapi->hw_status;
7496         unsigned int handled = 1;
7497
7498         /* In INTx mode, it is possible for the interrupt to arrive at
7499          * the CPU before the status block posted prior to the interrupt.
7500          * Reading the PCI State register will confirm whether the
7501          * interrupt is ours and will flush the status block.
7502          */
7503         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7504                 if (tg3_flag(tp, CHIP_RESETTING) ||
7505                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7506                         handled = 0;
7507                         goto out;
7508                 }
7509         }
7510
7511         /*
7512          * Writing any value to intr-mbox-0 clears PCI INTA# and
7513          * chip-internal interrupt pending events.
7514          * Writing non-zero to intr-mbox-0 additional tells the
7515          * NIC to stop sending us irqs, engaging "in-intr-handler"
7516          * event coalescing.
7517          *
7518          * Flush the mailbox to de-assert the IRQ immediately to prevent
7519          * spurious interrupts.  The flush impacts performance but
7520          * excessive spurious interrupts can be worse in some cases.
7521          */
7522         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7523         if (tg3_irq_sync(tp))
7524                 goto out;
7525         sblk->status &= ~SD_STATUS_UPDATED;
7526         if (likely(tg3_has_work(tnapi))) {
7527                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7528                 napi_schedule(&tnapi->napi);
7529         } else {
7530                 /* No work, shared interrupt perhaps?  re-enable
7531                  * interrupts, and flush that PCI write
7532                  */
7533                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7534                                0x00000000);
7535         }
7536 out:
7537         return IRQ_RETVAL(handled);
7538 }
7539
7540 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7541 {
7542         struct tg3_napi *tnapi = dev_id;
7543         struct tg3 *tp = tnapi->tp;
7544         struct tg3_hw_status *sblk = tnapi->hw_status;
7545         unsigned int handled = 1;
7546
7547         /* In INTx mode, it is possible for the interrupt to arrive at
7548          * the CPU before the status block posted prior to the interrupt.
7549          * Reading the PCI State register will confirm whether the
7550          * interrupt is ours and will flush the status block.
7551          */
7552         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7553                 if (tg3_flag(tp, CHIP_RESETTING) ||
7554                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7555                         handled = 0;
7556                         goto out;
7557                 }
7558         }
7559
7560         /*
7561          * writing any value to intr-mbox-0 clears PCI INTA# and
7562          * chip-internal interrupt pending events.
7563          * writing non-zero to intr-mbox-0 additional tells the
7564          * NIC to stop sending us irqs, engaging "in-intr-handler"
7565          * event coalescing.
7566          *
7567          * Flush the mailbox to de-assert the IRQ immediately to prevent
7568          * spurious interrupts.  The flush impacts performance but
7569          * excessive spurious interrupts can be worse in some cases.
7570          */
7571         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7572
7573         /*
7574          * In a shared interrupt configuration, sometimes other devices'
7575          * interrupts will scream.  We record the current status tag here
7576          * so that the above check can report that the screaming interrupts
7577          * are unhandled.  Eventually they will be silenced.
7578          */
7579         tnapi->last_irq_tag = sblk->status_tag;
7580
7581         if (tg3_irq_sync(tp))
7582                 goto out;
7583
7584         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7585
7586         napi_schedule(&tnapi->napi);
7587
7588 out:
7589         return IRQ_RETVAL(handled);
7590 }
7591
7592 /* ISR for interrupt test */
7593 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7594 {
7595         struct tg3_napi *tnapi = dev_id;
7596         struct tg3 *tp = tnapi->tp;
7597         struct tg3_hw_status *sblk = tnapi->hw_status;
7598
7599         if ((sblk->status & SD_STATUS_UPDATED) ||
7600             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7601                 tg3_disable_ints(tp);
7602                 return IRQ_RETVAL(1);
7603         }
7604         return IRQ_RETVAL(0);
7605 }
7606
7607 #ifdef CONFIG_NET_POLL_CONTROLLER
7608 static void tg3_poll_controller(struct net_device *dev)
7609 {
7610         int i;
7611         struct tg3 *tp = netdev_priv(dev);
7612
7613         if (tg3_irq_sync(tp))
7614                 return;
7615
7616         for (i = 0; i < tp->irq_cnt; i++)
7617                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7618 }
7619 #endif
7620
7621 static void tg3_tx_timeout(struct net_device *dev)
7622 {
7623         struct tg3 *tp = netdev_priv(dev);
7624
7625         if (netif_msg_tx_err(tp)) {
7626                 netdev_err(dev, "transmit timed out, resetting\n");
7627                 tg3_dump_state(tp);
7628         }
7629
7630         tg3_reset_task_schedule(tp);
7631 }
7632
7633 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7634 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7635 {
7636         u32 base = (u32) mapping & 0xffffffff;
7637
7638         return base + len + 8 < base;
7639 }
7640
7641 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7642  * of any 4GB boundaries: 4G, 8G, etc
7643  */
7644 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7645                                            u32 len, u32 mss)
7646 {
7647         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7648                 u32 base = (u32) mapping & 0xffffffff;
7649
7650                 return ((base + len + (mss & 0x3fff)) < base);
7651         }
7652         return 0;
7653 }
7654
7655 /* Test for DMA addresses > 40-bit */
7656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7657                                           int len)
7658 {
7659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7660         if (tg3_flag(tp, 40BIT_DMA_BUG))
7661                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7662         return 0;
7663 #else
7664         return 0;
7665 #endif
7666 }
7667
7668 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7669                                  dma_addr_t mapping, u32 len, u32 flags,
7670                                  u32 mss, u32 vlan)
7671 {
7672         txbd->addr_hi = ((u64) mapping >> 32);
7673         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7674         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7675         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7676 }
7677
7678 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7679                             dma_addr_t map, u32 len, u32 flags,
7680                             u32 mss, u32 vlan)
7681 {
7682         struct tg3 *tp = tnapi->tp;
7683         bool hwbug = false;
7684
7685         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7686                 hwbug = true;
7687
7688         if (tg3_4g_overflow_test(map, len))
7689                 hwbug = true;
7690
7691         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7692                 hwbug = true;
7693
7694         if (tg3_40bit_overflow_test(tp, map, len))
7695                 hwbug = true;
7696
7697         if (tp->dma_limit) {
7698                 u32 prvidx = *entry;
7699                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7700                 while (len > tp->dma_limit && *budget) {
7701                         u32 frag_len = tp->dma_limit;
7702                         len -= tp->dma_limit;
7703
7704                         /* Avoid the 8byte DMA problem */
7705                         if (len <= 8) {
7706                                 len += tp->dma_limit / 2;
7707                                 frag_len = tp->dma_limit / 2;
7708                         }
7709
7710                         tnapi->tx_buffers[*entry].fragmented = true;
7711
7712                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7713                                       frag_len, tmp_flag, mss, vlan);
7714                         *budget -= 1;
7715                         prvidx = *entry;
7716                         *entry = NEXT_TX(*entry);
7717
7718                         map += frag_len;
7719                 }
7720
7721                 if (len) {
7722                         if (*budget) {
7723                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7724                                               len, flags, mss, vlan);
7725                                 *budget -= 1;
7726                                 *entry = NEXT_TX(*entry);
7727                         } else {
7728                                 hwbug = true;
7729                                 tnapi->tx_buffers[prvidx].fragmented = false;
7730                         }
7731                 }
7732         } else {
7733                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734                               len, flags, mss, vlan);
7735                 *entry = NEXT_TX(*entry);
7736         }
7737
7738         return hwbug;
7739 }
7740
7741 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7742 {
7743         int i;
7744         struct sk_buff *skb;
7745         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7746
7747         skb = txb->skb;
7748         txb->skb = NULL;
7749
7750         pci_unmap_single(tnapi->tp->pdev,
7751                          dma_unmap_addr(txb, mapping),
7752                          skb_headlen(skb),
7753                          PCI_DMA_TODEVICE);
7754
7755         while (txb->fragmented) {
7756                 txb->fragmented = false;
7757                 entry = NEXT_TX(entry);
7758                 txb = &tnapi->tx_buffers[entry];
7759         }
7760
7761         for (i = 0; i <= last; i++) {
7762                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7763
7764                 entry = NEXT_TX(entry);
7765                 txb = &tnapi->tx_buffers[entry];
7766
7767                 pci_unmap_page(tnapi->tp->pdev,
7768                                dma_unmap_addr(txb, mapping),
7769                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7770
7771                 while (txb->fragmented) {
7772                         txb->fragmented = false;
7773                         entry = NEXT_TX(entry);
7774                         txb = &tnapi->tx_buffers[entry];
7775                 }
7776         }
7777 }
7778
7779 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7780 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7781                                        struct sk_buff **pskb,
7782                                        u32 *entry, u32 *budget,
7783                                        u32 base_flags, u32 mss, u32 vlan)
7784 {
7785         struct tg3 *tp = tnapi->tp;
7786         struct sk_buff *new_skb, *skb = *pskb;
7787         dma_addr_t new_addr = 0;
7788         int ret = 0;
7789
7790         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7791                 new_skb = skb_copy(skb, GFP_ATOMIC);
7792         else {
7793                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7794
7795                 new_skb = skb_copy_expand(skb,
7796                                           skb_headroom(skb) + more_headroom,
7797                                           skb_tailroom(skb), GFP_ATOMIC);
7798         }
7799
7800         if (!new_skb) {
7801                 ret = -1;
7802         } else {
7803                 /* New SKB is guaranteed to be linear. */
7804                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7805                                           PCI_DMA_TODEVICE);
7806                 /* Make sure the mapping succeeded */
7807                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7808                         dev_kfree_skb_any(new_skb);
7809                         ret = -1;
7810                 } else {
7811                         u32 save_entry = *entry;
7812
7813                         base_flags |= TXD_FLAG_END;
7814
7815                         tnapi->tx_buffers[*entry].skb = new_skb;
7816                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7817                                            mapping, new_addr);
7818
7819                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7820                                             new_skb->len, base_flags,
7821                                             mss, vlan)) {
7822                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7823                                 dev_kfree_skb_any(new_skb);
7824                                 ret = -1;
7825                         }
7826                 }
7827         }
7828
7829         dev_kfree_skb_any(skb);
7830         *pskb = new_skb;
7831         return ret;
7832 }
7833
7834 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7835 {
7836         /* Check if we will never have enough descriptors,
7837          * as gso_segs can be more than current ring size
7838          */
7839         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7840 }
7841
7842 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7843
7844 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7845  * indicated in tg3_tx_frag_set()
7846  */
7847 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7848                        struct netdev_queue *txq, struct sk_buff *skb)
7849 {
7850         struct sk_buff *segs, *nskb;
7851         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7852
7853         /* Estimate the number of fragments in the worst case */
7854         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7855                 netif_tx_stop_queue(txq);
7856
7857                 /* netif_tx_stop_queue() must be done before checking
7858                  * checking tx index in tg3_tx_avail() below, because in
7859                  * tg3_tx(), we update tx index before checking for
7860                  * netif_tx_queue_stopped().
7861                  */
7862                 smp_mb();
7863                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7864                         return NETDEV_TX_BUSY;
7865
7866                 netif_tx_wake_queue(txq);
7867         }
7868
7869         segs = skb_gso_segment(skb, tp->dev->features &
7870                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7871         if (IS_ERR(segs) || !segs)
7872                 goto tg3_tso_bug_end;
7873
7874         do {
7875                 nskb = segs;
7876                 segs = segs->next;
7877                 nskb->next = NULL;
7878                 tg3_start_xmit(nskb, tp->dev);
7879         } while (segs);
7880
7881 tg3_tso_bug_end:
7882         dev_kfree_skb_any(skb);
7883
7884         return NETDEV_TX_OK;
7885 }
7886
7887 /* hard_start_xmit for all devices */
7888 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7889 {
7890         struct tg3 *tp = netdev_priv(dev);
7891         u32 len, entry, base_flags, mss, vlan = 0;
7892         u32 budget;
7893         int i = -1, would_hit_hwbug;
7894         dma_addr_t mapping;
7895         struct tg3_napi *tnapi;
7896         struct netdev_queue *txq;
7897         unsigned int last;
7898         struct iphdr *iph = NULL;
7899         struct tcphdr *tcph = NULL;
7900         __sum16 tcp_csum = 0, ip_csum = 0;
7901         __be16 ip_tot_len = 0;
7902
7903         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7904         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7905         if (tg3_flag(tp, ENABLE_TSS))
7906                 tnapi++;
7907
7908         budget = tg3_tx_avail(tnapi);
7909
7910         /* We are running in BH disabled context with netif_tx_lock
7911          * and TX reclaim runs via tp->napi.poll inside of a software
7912          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7913          * no IRQ context deadlocks to worry about either.  Rejoice!
7914          */
7915         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7916                 if (!netif_tx_queue_stopped(txq)) {
7917                         netif_tx_stop_queue(txq);
7918
7919                         /* This is a hard error, log it. */
7920                         netdev_err(dev,
7921                                    "BUG! Tx Ring full when queue awake!\n");
7922                 }
7923                 return NETDEV_TX_BUSY;
7924         }
7925
7926         entry = tnapi->tx_prod;
7927         base_flags = 0;
7928
7929         mss = skb_shinfo(skb)->gso_size;
7930         if (mss) {
7931                 u32 tcp_opt_len, hdr_len;
7932
7933                 if (skb_cow_head(skb, 0))
7934                         goto drop;
7935
7936                 iph = ip_hdr(skb);
7937                 tcp_opt_len = tcp_optlen(skb);
7938
7939                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7940
7941                 /* HW/FW can not correctly segment packets that have been
7942                  * vlan encapsulated.
7943                  */
7944                 if (skb->protocol == htons(ETH_P_8021Q) ||
7945                     skb->protocol == htons(ETH_P_8021AD)) {
7946                         if (tg3_tso_bug_gso_check(tnapi, skb))
7947                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7948                         goto drop;
7949                 }
7950
7951                 if (!skb_is_gso_v6(skb)) {
7952                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7953                             tg3_flag(tp, TSO_BUG)) {
7954                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7955                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7956                                 goto drop;
7957                         }
7958                         ip_csum = iph->check;
7959                         ip_tot_len = iph->tot_len;
7960                         iph->check = 0;
7961                         iph->tot_len = htons(mss + hdr_len);
7962                 }
7963
7964                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7965                                TXD_FLAG_CPU_POST_DMA);
7966
7967                 tcph = tcp_hdr(skb);
7968                 tcp_csum = tcph->check;
7969
7970                 if (tg3_flag(tp, HW_TSO_1) ||
7971                     tg3_flag(tp, HW_TSO_2) ||
7972                     tg3_flag(tp, HW_TSO_3)) {
7973                         tcph->check = 0;
7974                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7975                 } else {
7976                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7977                                                          0, IPPROTO_TCP, 0);
7978                 }
7979
7980                 if (tg3_flag(tp, HW_TSO_3)) {
7981                         mss |= (hdr_len & 0xc) << 12;
7982                         if (hdr_len & 0x10)
7983                                 base_flags |= 0x00000010;
7984                         base_flags |= (hdr_len & 0x3e0) << 5;
7985                 } else if (tg3_flag(tp, HW_TSO_2))
7986                         mss |= hdr_len << 9;
7987                 else if (tg3_flag(tp, HW_TSO_1) ||
7988                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7989                         if (tcp_opt_len || iph->ihl > 5) {
7990                                 int tsflags;
7991
7992                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7993                                 mss |= (tsflags << 11);
7994                         }
7995                 } else {
7996                         if (tcp_opt_len || iph->ihl > 5) {
7997                                 int tsflags;
7998
7999                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8000                                 base_flags |= tsflags << 12;
8001                         }
8002                 }
8003         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8004                 /* HW/FW can not correctly checksum packets that have been
8005                  * vlan encapsulated.
8006                  */
8007                 if (skb->protocol == htons(ETH_P_8021Q) ||
8008                     skb->protocol == htons(ETH_P_8021AD)) {
8009                         if (skb_checksum_help(skb))
8010                                 goto drop;
8011                 } else  {
8012                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8013                 }
8014         }
8015
8016         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8017             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8018                 base_flags |= TXD_FLAG_JMB_PKT;
8019
8020         if (skb_vlan_tag_present(skb)) {
8021                 base_flags |= TXD_FLAG_VLAN;
8022                 vlan = skb_vlan_tag_get(skb);
8023         }
8024
8025         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8026             tg3_flag(tp, TX_TSTAMP_EN)) {
8027                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8028                 base_flags |= TXD_FLAG_HWTSTAMP;
8029         }
8030
8031         len = skb_headlen(skb);
8032
8033         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8034         if (pci_dma_mapping_error(tp->pdev, mapping))
8035                 goto drop;
8036
8037
8038         tnapi->tx_buffers[entry].skb = skb;
8039         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8040
8041         would_hit_hwbug = 0;
8042
8043         if (tg3_flag(tp, 5701_DMA_BUG))
8044                 would_hit_hwbug = 1;
8045
8046         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8047                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8048                             mss, vlan)) {
8049                 would_hit_hwbug = 1;
8050         } else if (skb_shinfo(skb)->nr_frags > 0) {
8051                 u32 tmp_mss = mss;
8052
8053                 if (!tg3_flag(tp, HW_TSO_1) &&
8054                     !tg3_flag(tp, HW_TSO_2) &&
8055                     !tg3_flag(tp, HW_TSO_3))
8056                         tmp_mss = 0;
8057
8058                 /* Now loop through additional data
8059                  * fragments, and queue them.
8060                  */
8061                 last = skb_shinfo(skb)->nr_frags - 1;
8062                 for (i = 0; i <= last; i++) {
8063                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8064
8065                         len = skb_frag_size(frag);
8066                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8067                                                    len, DMA_TO_DEVICE);
8068
8069                         tnapi->tx_buffers[entry].skb = NULL;
8070                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8071                                            mapping);
8072                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8073                                 goto dma_error;
8074
8075                         if (!budget ||
8076                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8077                                             len, base_flags |
8078                                             ((i == last) ? TXD_FLAG_END : 0),
8079                                             tmp_mss, vlan)) {
8080                                 would_hit_hwbug = 1;
8081                                 break;
8082                         }
8083                 }
8084         }
8085
8086         if (would_hit_hwbug) {
8087                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8088
8089                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8090                         /* If it's a TSO packet, do GSO instead of
8091                          * allocating and copying to a large linear SKB
8092                          */
8093                         if (ip_tot_len) {
8094                                 iph->check = ip_csum;
8095                                 iph->tot_len = ip_tot_len;
8096                         }
8097                         tcph->check = tcp_csum;
8098                         return tg3_tso_bug(tp, tnapi, txq, skb);
8099                 }
8100
8101                 /* If the workaround fails due to memory/mapping
8102                  * failure, silently drop this packet.
8103                  */
8104                 entry = tnapi->tx_prod;
8105                 budget = tg3_tx_avail(tnapi);
8106                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8107                                                 base_flags, mss, vlan))
8108                         goto drop_nofree;
8109         }
8110
8111         skb_tx_timestamp(skb);
8112         netdev_tx_sent_queue(txq, skb->len);
8113
8114         /* Sync BD data before updating mailbox */
8115         wmb();
8116
8117         tnapi->tx_prod = entry;
8118         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8119                 netif_tx_stop_queue(txq);
8120
8121                 /* netif_tx_stop_queue() must be done before checking
8122                  * checking tx index in tg3_tx_avail() below, because in
8123                  * tg3_tx(), we update tx index before checking for
8124                  * netif_tx_queue_stopped().
8125                  */
8126                 smp_mb();
8127                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8128                         netif_tx_wake_queue(txq);
8129         }
8130
8131         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8132                 /* Packets are ready, update Tx producer idx on card. */
8133                 tw32_tx_mbox(tnapi->prodmbox, entry);
8134                 mmiowb();
8135         }
8136
8137         return NETDEV_TX_OK;
8138
8139 dma_error:
8140         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8141         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8142 drop:
8143         dev_kfree_skb_any(skb);
8144 drop_nofree:
8145         tp->tx_dropped++;
8146         return NETDEV_TX_OK;
8147 }
8148
8149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8150 {
8151         if (enable) {
8152                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8153                                   MAC_MODE_PORT_MODE_MASK);
8154
8155                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8156
8157                 if (!tg3_flag(tp, 5705_PLUS))
8158                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8159
8160                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8161                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8162                 else
8163                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8164         } else {
8165                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8166
8167                 if (tg3_flag(tp, 5705_PLUS) ||
8168                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8169                     tg3_asic_rev(tp) == ASIC_REV_5700)
8170                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8171         }
8172
8173         tw32(MAC_MODE, tp->mac_mode);
8174         udelay(40);
8175 }
8176
8177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8178 {
8179         u32 val, bmcr, mac_mode, ptest = 0;
8180
8181         tg3_phy_toggle_apd(tp, false);
8182         tg3_phy_toggle_automdix(tp, false);
8183
8184         if (extlpbk && tg3_phy_set_extloopbk(tp))
8185                 return -EIO;
8186
8187         bmcr = BMCR_FULLDPLX;
8188         switch (speed) {
8189         case SPEED_10:
8190                 break;
8191         case SPEED_100:
8192                 bmcr |= BMCR_SPEED100;
8193                 break;
8194         case SPEED_1000:
8195         default:
8196                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8197                         speed = SPEED_100;
8198                         bmcr |= BMCR_SPEED100;
8199                 } else {
8200                         speed = SPEED_1000;
8201                         bmcr |= BMCR_SPEED1000;
8202                 }
8203         }
8204
8205         if (extlpbk) {
8206                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8207                         tg3_readphy(tp, MII_CTRL1000, &val);
8208                         val |= CTL1000_AS_MASTER |
8209                                CTL1000_ENABLE_MASTER;
8210                         tg3_writephy(tp, MII_CTRL1000, val);
8211                 } else {
8212                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8213                                 MII_TG3_FET_PTEST_TRIM_2;
8214                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8215                 }
8216         } else
8217                 bmcr |= BMCR_LOOPBACK;
8218
8219         tg3_writephy(tp, MII_BMCR, bmcr);
8220
8221         /* The write needs to be flushed for the FETs */
8222         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8223                 tg3_readphy(tp, MII_BMCR, &bmcr);
8224
8225         udelay(40);
8226
8227         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8228             tg3_asic_rev(tp) == ASIC_REV_5785) {
8229                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8230                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8231                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8232
8233                 /* The write needs to be flushed for the AC131 */
8234                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8235         }
8236
8237         /* Reset to prevent losing 1st rx packet intermittently */
8238         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8239             tg3_flag(tp, 5780_CLASS)) {
8240                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8241                 udelay(10);
8242                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8243         }
8244
8245         mac_mode = tp->mac_mode &
8246                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8247         if (speed == SPEED_1000)
8248                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8249         else
8250                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8251
8252         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8253                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8254
8255                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8256                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8257                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8258                         mac_mode |= MAC_MODE_LINK_POLARITY;
8259
8260                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8261                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8262         }
8263
8264         tw32(MAC_MODE, mac_mode);
8265         udelay(40);
8266
8267         return 0;
8268 }
8269
8270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8271 {
8272         struct tg3 *tp = netdev_priv(dev);
8273
8274         if (features & NETIF_F_LOOPBACK) {
8275                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8276                         return;
8277
8278                 spin_lock_bh(&tp->lock);
8279                 tg3_mac_loopback(tp, true);
8280                 netif_carrier_on(tp->dev);
8281                 spin_unlock_bh(&tp->lock);
8282                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8283         } else {
8284                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8285                         return;
8286
8287                 spin_lock_bh(&tp->lock);
8288                 tg3_mac_loopback(tp, false);
8289                 /* Force link status check */
8290                 tg3_setup_phy(tp, true);
8291                 spin_unlock_bh(&tp->lock);
8292                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8293         }
8294 }
8295
8296 static netdev_features_t tg3_fix_features(struct net_device *dev,
8297         netdev_features_t features)
8298 {
8299         struct tg3 *tp = netdev_priv(dev);
8300
8301         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8302                 features &= ~NETIF_F_ALL_TSO;
8303
8304         return features;
8305 }
8306
8307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8308 {
8309         netdev_features_t changed = dev->features ^ features;
8310
8311         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8312                 tg3_set_loopback(dev, features);
8313
8314         return 0;
8315 }
8316
8317 static void tg3_rx_prodring_free(struct tg3 *tp,
8318                                  struct tg3_rx_prodring_set *tpr)
8319 {
8320         int i;
8321
8322         if (tpr != &tp->napi[0].prodring) {
8323                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8324                      i = (i + 1) & tp->rx_std_ring_mask)
8325                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8326                                         tp->rx_pkt_map_sz);
8327
8328                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8329                         for (i = tpr->rx_jmb_cons_idx;
8330                              i != tpr->rx_jmb_prod_idx;
8331                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8332                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8333                                                 TG3_RX_JMB_MAP_SZ);
8334                         }
8335                 }
8336
8337                 return;
8338         }
8339
8340         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8341                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8342                                 tp->rx_pkt_map_sz);
8343
8344         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8345                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8346                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8347                                         TG3_RX_JMB_MAP_SZ);
8348         }
8349 }
8350
8351 /* Initialize rx rings for packet processing.
8352  *
8353  * The chip has been shut down and the driver detached from
8354  * the networking, so no interrupts or new tx packets will
8355  * end up in the driver.  tp->{tx,}lock are held and thus
8356  * we may not sleep.
8357  */
8358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8359                                  struct tg3_rx_prodring_set *tpr)
8360 {
8361         u32 i, rx_pkt_dma_sz;
8362
8363         tpr->rx_std_cons_idx = 0;
8364         tpr->rx_std_prod_idx = 0;
8365         tpr->rx_jmb_cons_idx = 0;
8366         tpr->rx_jmb_prod_idx = 0;
8367
8368         if (tpr != &tp->napi[0].prodring) {
8369                 memset(&tpr->rx_std_buffers[0], 0,
8370                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8371                 if (tpr->rx_jmb_buffers)
8372                         memset(&tpr->rx_jmb_buffers[0], 0,
8373                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8374                 goto done;
8375         }
8376
8377         /* Zero out all descriptors. */
8378         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8379
8380         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8381         if (tg3_flag(tp, 5780_CLASS) &&
8382             tp->dev->mtu > ETH_DATA_LEN)
8383                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8384         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8385
8386         /* Initialize invariants of the rings, we only set this
8387          * stuff once.  This works because the card does not
8388          * write into the rx buffer posting rings.
8389          */
8390         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8391                 struct tg3_rx_buffer_desc *rxd;
8392
8393                 rxd = &tpr->rx_std[i];
8394                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8395                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8396                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8397                                (i << RXD_OPAQUE_INDEX_SHIFT));
8398         }
8399
8400         /* Now allocate fresh SKBs for each rx ring. */
8401         for (i = 0; i < tp->rx_pending; i++) {
8402                 unsigned int frag_size;
8403
8404                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8405                                       &frag_size) < 0) {
8406                         netdev_warn(tp->dev,
8407                                     "Using a smaller RX standard ring. Only "
8408                                     "%d out of %d buffers were allocated "
8409                                     "successfully\n", i, tp->rx_pending);
8410                         if (i == 0)
8411                                 goto initfail;
8412                         tp->rx_pending = i;
8413                         break;
8414                 }
8415         }
8416
8417         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8418                 goto done;
8419
8420         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8421
8422         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8423                 goto done;
8424
8425         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8426                 struct tg3_rx_buffer_desc *rxd;
8427
8428                 rxd = &tpr->rx_jmb[i].std;
8429                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8430                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8431                                   RXD_FLAG_JUMBO;
8432                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8433                        (i << RXD_OPAQUE_INDEX_SHIFT));
8434         }
8435
8436         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8437                 unsigned int frag_size;
8438
8439                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8440                                       &frag_size) < 0) {
8441                         netdev_warn(tp->dev,
8442                                     "Using a smaller RX jumbo ring. Only %d "
8443                                     "out of %d buffers were allocated "
8444                                     "successfully\n", i, tp->rx_jumbo_pending);
8445                         if (i == 0)
8446                                 goto initfail;
8447                         tp->rx_jumbo_pending = i;
8448                         break;
8449                 }
8450         }
8451
8452 done:
8453         return 0;
8454
8455 initfail:
8456         tg3_rx_prodring_free(tp, tpr);
8457         return -ENOMEM;
8458 }
8459
8460 static void tg3_rx_prodring_fini(struct tg3 *tp,
8461                                  struct tg3_rx_prodring_set *tpr)
8462 {
8463         kfree(tpr->rx_std_buffers);
8464         tpr->rx_std_buffers = NULL;
8465         kfree(tpr->rx_jmb_buffers);
8466         tpr->rx_jmb_buffers = NULL;
8467         if (tpr->rx_std) {
8468                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8469                                   tpr->rx_std, tpr->rx_std_mapping);
8470                 tpr->rx_std = NULL;
8471         }
8472         if (tpr->rx_jmb) {
8473                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8474                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8475                 tpr->rx_jmb = NULL;
8476         }
8477 }
8478
8479 static int tg3_rx_prodring_init(struct tg3 *tp,
8480                                 struct tg3_rx_prodring_set *tpr)
8481 {
8482         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8483                                       GFP_KERNEL);
8484         if (!tpr->rx_std_buffers)
8485                 return -ENOMEM;
8486
8487         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8488                                          TG3_RX_STD_RING_BYTES(tp),
8489                                          &tpr->rx_std_mapping,
8490                                          GFP_KERNEL);
8491         if (!tpr->rx_std)
8492                 goto err_out;
8493
8494         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8495                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8496                                               GFP_KERNEL);
8497                 if (!tpr->rx_jmb_buffers)
8498                         goto err_out;
8499
8500                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8501                                                  TG3_RX_JMB_RING_BYTES(tp),
8502                                                  &tpr->rx_jmb_mapping,
8503                                                  GFP_KERNEL);
8504                 if (!tpr->rx_jmb)
8505                         goto err_out;
8506         }
8507
8508         return 0;
8509
8510 err_out:
8511         tg3_rx_prodring_fini(tp, tpr);
8512         return -ENOMEM;
8513 }
8514
8515 /* Free up pending packets in all rx/tx rings.
8516  *
8517  * The chip has been shut down and the driver detached from
8518  * the networking, so no interrupts or new tx packets will
8519  * end up in the driver.  tp->{tx,}lock is not held and we are not
8520  * in an interrupt context and thus may sleep.
8521  */
8522 static void tg3_free_rings(struct tg3 *tp)
8523 {
8524         int i, j;
8525
8526         for (j = 0; j < tp->irq_cnt; j++) {
8527                 struct tg3_napi *tnapi = &tp->napi[j];
8528
8529                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8530
8531                 if (!tnapi->tx_buffers)
8532                         continue;
8533
8534                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8535                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8536
8537                         if (!skb)
8538                                 continue;
8539
8540                         tg3_tx_skb_unmap(tnapi, i,
8541                                          skb_shinfo(skb)->nr_frags - 1);
8542
8543                         dev_kfree_skb_any(skb);
8544                 }
8545                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8546         }
8547 }
8548
8549 /* Initialize tx/rx rings for packet processing.
8550  *
8551  * The chip has been shut down and the driver detached from
8552  * the networking, so no interrupts or new tx packets will
8553  * end up in the driver.  tp->{tx,}lock are held and thus
8554  * we may not sleep.
8555  */
8556 static int tg3_init_rings(struct tg3 *tp)
8557 {
8558         int i;
8559
8560         /* Free up all the SKBs. */
8561         tg3_free_rings(tp);
8562
8563         for (i = 0; i < tp->irq_cnt; i++) {
8564                 struct tg3_napi *tnapi = &tp->napi[i];
8565
8566                 tnapi->last_tag = 0;
8567                 tnapi->last_irq_tag = 0;
8568                 tnapi->hw_status->status = 0;
8569                 tnapi->hw_status->status_tag = 0;
8570                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8571
8572                 tnapi->tx_prod = 0;
8573                 tnapi->tx_cons = 0;
8574                 if (tnapi->tx_ring)
8575                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8576
8577                 tnapi->rx_rcb_ptr = 0;
8578                 if (tnapi->rx_rcb)
8579                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8580
8581                 if (tnapi->prodring.rx_std &&
8582                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8583                         tg3_free_rings(tp);
8584                         return -ENOMEM;
8585                 }
8586         }
8587
8588         return 0;
8589 }
8590
8591 static void tg3_mem_tx_release(struct tg3 *tp)
8592 {
8593         int i;
8594
8595         for (i = 0; i < tp->irq_max; i++) {
8596                 struct tg3_napi *tnapi = &tp->napi[i];
8597
8598                 if (tnapi->tx_ring) {
8599                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8600                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8601                         tnapi->tx_ring = NULL;
8602                 }
8603
8604                 kfree(tnapi->tx_buffers);
8605                 tnapi->tx_buffers = NULL;
8606         }
8607 }
8608
8609 static int tg3_mem_tx_acquire(struct tg3 *tp)
8610 {
8611         int i;
8612         struct tg3_napi *tnapi = &tp->napi[0];
8613
8614         /* If multivector TSS is enabled, vector 0 does not handle
8615          * tx interrupts.  Don't allocate any resources for it.
8616          */
8617         if (tg3_flag(tp, ENABLE_TSS))
8618                 tnapi++;
8619
8620         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8621                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8622                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8623                 if (!tnapi->tx_buffers)
8624                         goto err_out;
8625
8626                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8627                                                     TG3_TX_RING_BYTES,
8628                                                     &tnapi->tx_desc_mapping,
8629                                                     GFP_KERNEL);
8630                 if (!tnapi->tx_ring)
8631                         goto err_out;
8632         }
8633
8634         return 0;
8635
8636 err_out:
8637         tg3_mem_tx_release(tp);
8638         return -ENOMEM;
8639 }
8640
8641 static void tg3_mem_rx_release(struct tg3 *tp)
8642 {
8643         int i;
8644
8645         for (i = 0; i < tp->irq_max; i++) {
8646                 struct tg3_napi *tnapi = &tp->napi[i];
8647
8648                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8649
8650                 if (!tnapi->rx_rcb)
8651                         continue;
8652
8653                 dma_free_coherent(&tp->pdev->dev,
8654                                   TG3_RX_RCB_RING_BYTES(tp),
8655                                   tnapi->rx_rcb,
8656                                   tnapi->rx_rcb_mapping);
8657                 tnapi->rx_rcb = NULL;
8658         }
8659 }
8660
8661 static int tg3_mem_rx_acquire(struct tg3 *tp)
8662 {
8663         unsigned int i, limit;
8664
8665         limit = tp->rxq_cnt;
8666
8667         /* If RSS is enabled, we need a (dummy) producer ring
8668          * set on vector zero.  This is the true hw prodring.
8669          */
8670         if (tg3_flag(tp, ENABLE_RSS))
8671                 limit++;
8672
8673         for (i = 0; i < limit; i++) {
8674                 struct tg3_napi *tnapi = &tp->napi[i];
8675
8676                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8677                         goto err_out;
8678
8679                 /* If multivector RSS is enabled, vector 0
8680                  * does not handle rx or tx interrupts.
8681                  * Don't allocate any resources for it.
8682                  */
8683                 if (!i && tg3_flag(tp, ENABLE_RSS))
8684                         continue;
8685
8686                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8687                                                     TG3_RX_RCB_RING_BYTES(tp),
8688                                                     &tnapi->rx_rcb_mapping,
8689                                                     GFP_KERNEL);
8690                 if (!tnapi->rx_rcb)
8691                         goto err_out;
8692         }
8693
8694         return 0;
8695
8696 err_out:
8697         tg3_mem_rx_release(tp);
8698         return -ENOMEM;
8699 }
8700
8701 /*
8702  * Must not be invoked with interrupt sources disabled and
8703  * the hardware shutdown down.
8704  */
8705 static void tg3_free_consistent(struct tg3 *tp)
8706 {
8707         int i;
8708
8709         for (i = 0; i < tp->irq_cnt; i++) {
8710                 struct tg3_napi *tnapi = &tp->napi[i];
8711
8712                 if (tnapi->hw_status) {
8713                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8714                                           tnapi->hw_status,
8715                                           tnapi->status_mapping);
8716                         tnapi->hw_status = NULL;
8717                 }
8718         }
8719
8720         tg3_mem_rx_release(tp);
8721         tg3_mem_tx_release(tp);
8722
8723         /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8724         tg3_full_lock(tp, 0);
8725         if (tp->hw_stats) {
8726                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8727                                   tp->hw_stats, tp->stats_mapping);
8728                 tp->hw_stats = NULL;
8729         }
8730         tg3_full_unlock(tp);
8731 }
8732
8733 /*
8734  * Must not be invoked with interrupt sources disabled and
8735  * the hardware shutdown down.  Can sleep.
8736  */
8737 static int tg3_alloc_consistent(struct tg3 *tp)
8738 {
8739         int i;
8740
8741         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8742                                            sizeof(struct tg3_hw_stats),
8743                                            &tp->stats_mapping, GFP_KERNEL);
8744         if (!tp->hw_stats)
8745                 goto err_out;
8746
8747         for (i = 0; i < tp->irq_cnt; i++) {
8748                 struct tg3_napi *tnapi = &tp->napi[i];
8749                 struct tg3_hw_status *sblk;
8750
8751                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8752                                                        TG3_HW_STATUS_SIZE,
8753                                                        &tnapi->status_mapping,
8754                                                        GFP_KERNEL);
8755                 if (!tnapi->hw_status)
8756                         goto err_out;
8757
8758                 sblk = tnapi->hw_status;
8759
8760                 if (tg3_flag(tp, ENABLE_RSS)) {
8761                         u16 *prodptr = NULL;
8762
8763                         /*
8764                          * When RSS is enabled, the status block format changes
8765                          * slightly.  The "rx_jumbo_consumer", "reserved",
8766                          * and "rx_mini_consumer" members get mapped to the
8767                          * other three rx return ring producer indexes.
8768                          */
8769                         switch (i) {
8770                         case 1:
8771                                 prodptr = &sblk->idx[0].rx_producer;
8772                                 break;
8773                         case 2:
8774                                 prodptr = &sblk->rx_jumbo_consumer;
8775                                 break;
8776                         case 3:
8777                                 prodptr = &sblk->reserved;
8778                                 break;
8779                         case 4:
8780                                 prodptr = &sblk->rx_mini_consumer;
8781                                 break;
8782                         }
8783                         tnapi->rx_rcb_prod_idx = prodptr;
8784                 } else {
8785                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8786                 }
8787         }
8788
8789         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8790                 goto err_out;
8791
8792         return 0;
8793
8794 err_out:
8795         tg3_free_consistent(tp);
8796         return -ENOMEM;
8797 }
8798
8799 #define MAX_WAIT_CNT 1000
8800
8801 /* To stop a block, clear the enable bit and poll till it
8802  * clears.  tp->lock is held.
8803  */
8804 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8805 {
8806         unsigned int i;
8807         u32 val;
8808
8809         if (tg3_flag(tp, 5705_PLUS)) {
8810                 switch (ofs) {
8811                 case RCVLSC_MODE:
8812                 case DMAC_MODE:
8813                 case MBFREE_MODE:
8814                 case BUFMGR_MODE:
8815                 case MEMARB_MODE:
8816                         /* We can't enable/disable these bits of the
8817                          * 5705/5750, just say success.
8818                          */
8819                         return 0;
8820
8821                 default:
8822                         break;
8823                 }
8824         }
8825
8826         val = tr32(ofs);
8827         val &= ~enable_bit;
8828         tw32_f(ofs, val);
8829
8830         for (i = 0; i < MAX_WAIT_CNT; i++) {
8831                 if (pci_channel_offline(tp->pdev)) {
8832                         dev_err(&tp->pdev->dev,
8833                                 "tg3_stop_block device offline, "
8834                                 "ofs=%lx enable_bit=%x\n",
8835                                 ofs, enable_bit);
8836                         return -ENODEV;
8837                 }
8838
8839                 udelay(100);
8840                 val = tr32(ofs);
8841                 if ((val & enable_bit) == 0)
8842                         break;
8843         }
8844
8845         if (i == MAX_WAIT_CNT && !silent) {
8846                 dev_err(&tp->pdev->dev,
8847                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8848                         ofs, enable_bit);
8849                 return -ENODEV;
8850         }
8851
8852         return 0;
8853 }
8854
8855 /* tp->lock is held. */
8856 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8857 {
8858         int i, err;
8859
8860         tg3_disable_ints(tp);
8861
8862         if (pci_channel_offline(tp->pdev)) {
8863                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8864                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8865                 err = -ENODEV;
8866                 goto err_no_dev;
8867         }
8868
8869         tp->rx_mode &= ~RX_MODE_ENABLE;
8870         tw32_f(MAC_RX_MODE, tp->rx_mode);
8871         udelay(10);
8872
8873         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8874         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8875         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8876         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8877         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8879
8880         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8882         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8883         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8884         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8885         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8887
8888         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8889         tw32_f(MAC_MODE, tp->mac_mode);
8890         udelay(40);
8891
8892         tp->tx_mode &= ~TX_MODE_ENABLE;
8893         tw32_f(MAC_TX_MODE, tp->tx_mode);
8894
8895         for (i = 0; i < MAX_WAIT_CNT; i++) {
8896                 udelay(100);
8897                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8898                         break;
8899         }
8900         if (i >= MAX_WAIT_CNT) {
8901                 dev_err(&tp->pdev->dev,
8902                         "%s timed out, TX_MODE_ENABLE will not clear "
8903                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8904                 err |= -ENODEV;
8905         }
8906
8907         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8908         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8909         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8910
8911         tw32(FTQ_RESET, 0xffffffff);
8912         tw32(FTQ_RESET, 0x00000000);
8913
8914         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8915         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8916
8917 err_no_dev:
8918         for (i = 0; i < tp->irq_cnt; i++) {
8919                 struct tg3_napi *tnapi = &tp->napi[i];
8920                 if (tnapi->hw_status)
8921                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8922         }
8923
8924         return err;
8925 }
8926
8927 /* Save PCI command register before chip reset */
8928 static void tg3_save_pci_state(struct tg3 *tp)
8929 {
8930         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8931 }
8932
8933 /* Restore PCI state after chip reset */
8934 static void tg3_restore_pci_state(struct tg3 *tp)
8935 {
8936         u32 val;
8937
8938         /* Re-enable indirect register accesses. */
8939         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8940                                tp->misc_host_ctrl);
8941
8942         /* Set MAX PCI retry to zero. */
8943         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8944         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8945             tg3_flag(tp, PCIX_MODE))
8946                 val |= PCISTATE_RETRY_SAME_DMA;
8947         /* Allow reads and writes to the APE register and memory space. */
8948         if (tg3_flag(tp, ENABLE_APE))
8949                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8950                        PCISTATE_ALLOW_APE_SHMEM_WR |
8951                        PCISTATE_ALLOW_APE_PSPACE_WR;
8952         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8953
8954         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8955
8956         if (!tg3_flag(tp, PCI_EXPRESS)) {
8957                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8958                                       tp->pci_cacheline_sz);
8959                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8960                                       tp->pci_lat_timer);
8961         }
8962
8963         /* Make sure PCI-X relaxed ordering bit is clear. */
8964         if (tg3_flag(tp, PCIX_MODE)) {
8965                 u16 pcix_cmd;
8966
8967                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8968                                      &pcix_cmd);
8969                 pcix_cmd &= ~PCI_X_CMD_ERO;
8970                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971                                       pcix_cmd);
8972         }
8973
8974         if (tg3_flag(tp, 5780_CLASS)) {
8975
8976                 /* Chip reset on 5780 will reset MSI enable bit,
8977                  * so need to restore it.
8978                  */
8979                 if (tg3_flag(tp, USING_MSI)) {
8980                         u16 ctrl;
8981
8982                         pci_read_config_word(tp->pdev,
8983                                              tp->msi_cap + PCI_MSI_FLAGS,
8984                                              &ctrl);
8985                         pci_write_config_word(tp->pdev,
8986                                               tp->msi_cap + PCI_MSI_FLAGS,
8987                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8988                         val = tr32(MSGINT_MODE);
8989                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8990                 }
8991         }
8992 }
8993
8994 static void tg3_override_clk(struct tg3 *tp)
8995 {
8996         u32 val;
8997
8998         switch (tg3_asic_rev(tp)) {
8999         case ASIC_REV_5717:
9000                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9001                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9002                      TG3_CPMU_MAC_ORIDE_ENABLE);
9003                 break;
9004
9005         case ASIC_REV_5719:
9006         case ASIC_REV_5720:
9007                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9008                 break;
9009
9010         default:
9011                 return;
9012         }
9013 }
9014
9015 static void tg3_restore_clk(struct tg3 *tp)
9016 {
9017         u32 val;
9018
9019         switch (tg3_asic_rev(tp)) {
9020         case ASIC_REV_5717:
9021                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9022                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9023                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9024                 break;
9025
9026         case ASIC_REV_5719:
9027         case ASIC_REV_5720:
9028                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9029                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9030                 break;
9031
9032         default:
9033                 return;
9034         }
9035 }
9036
9037 /* tp->lock is held. */
9038 static int tg3_chip_reset(struct tg3 *tp)
9039         __releases(tp->lock)
9040         __acquires(tp->lock)
9041 {
9042         u32 val;
9043         void (*write_op)(struct tg3 *, u32, u32);
9044         int i, err;
9045
9046         if (!pci_device_is_present(tp->pdev))
9047                 return -ENODEV;
9048
9049         tg3_nvram_lock(tp);
9050
9051         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9052
9053         /* No matching tg3_nvram_unlock() after this because
9054          * chip reset below will undo the nvram lock.
9055          */
9056         tp->nvram_lock_cnt = 0;
9057
9058         /* GRC_MISC_CFG core clock reset will clear the memory
9059          * enable bit in PCI register 4 and the MSI enable bit
9060          * on some chips, so we save relevant registers here.
9061          */
9062         tg3_save_pci_state(tp);
9063
9064         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9065             tg3_flag(tp, 5755_PLUS))
9066                 tw32(GRC_FASTBOOT_PC, 0);
9067
9068         /*
9069          * We must avoid the readl() that normally takes place.
9070          * It locks machines, causes machine checks, and other
9071          * fun things.  So, temporarily disable the 5701
9072          * hardware workaround, while we do the reset.
9073          */
9074         write_op = tp->write32;
9075         if (write_op == tg3_write_flush_reg32)
9076                 tp->write32 = tg3_write32;
9077
9078         /* Prevent the irq handler from reading or writing PCI registers
9079          * during chip reset when the memory enable bit in the PCI command
9080          * register may be cleared.  The chip does not generate interrupt
9081          * at this time, but the irq handler may still be called due to irq
9082          * sharing or irqpoll.
9083          */
9084         tg3_flag_set(tp, CHIP_RESETTING);
9085         for (i = 0; i < tp->irq_cnt; i++) {
9086                 struct tg3_napi *tnapi = &tp->napi[i];
9087                 if (tnapi->hw_status) {
9088                         tnapi->hw_status->status = 0;
9089                         tnapi->hw_status->status_tag = 0;
9090                 }
9091                 tnapi->last_tag = 0;
9092                 tnapi->last_irq_tag = 0;
9093         }
9094         smp_mb();
9095
9096         tg3_full_unlock(tp);
9097
9098         for (i = 0; i < tp->irq_cnt; i++)
9099                 synchronize_irq(tp->napi[i].irq_vec);
9100
9101         tg3_full_lock(tp, 0);
9102
9103         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9104                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9105                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9106         }
9107
9108         /* do the reset */
9109         val = GRC_MISC_CFG_CORECLK_RESET;
9110
9111         if (tg3_flag(tp, PCI_EXPRESS)) {
9112                 /* Force PCIe 1.0a mode */
9113                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9114                     !tg3_flag(tp, 57765_PLUS) &&
9115                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9116                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9117                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9118
9119                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9120                         tw32(GRC_MISC_CFG, (1 << 29));
9121                         val |= (1 << 29);
9122                 }
9123         }
9124
9125         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9126                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9127                 tw32(GRC_VCPU_EXT_CTRL,
9128                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9129         }
9130
9131         /* Set the clock to the highest frequency to avoid timeouts. With link
9132          * aware mode, the clock speed could be slow and bootcode does not
9133          * complete within the expected time. Override the clock to allow the
9134          * bootcode to finish sooner and then restore it.
9135          */
9136         tg3_override_clk(tp);
9137
9138         /* Manage gphy power for all CPMU absent PCIe devices. */
9139         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9140                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9141
9142         tw32(GRC_MISC_CFG, val);
9143
9144         /* restore 5701 hardware bug workaround write method */
9145         tp->write32 = write_op;
9146
9147         /* Unfortunately, we have to delay before the PCI read back.
9148          * Some 575X chips even will not respond to a PCI cfg access
9149          * when the reset command is given to the chip.
9150          *
9151          * How do these hardware designers expect things to work
9152          * properly if the PCI write is posted for a long period
9153          * of time?  It is always necessary to have some method by
9154          * which a register read back can occur to push the write
9155          * out which does the reset.
9156          *
9157          * For most tg3 variants the trick below was working.
9158          * Ho hum...
9159          */
9160         udelay(120);
9161
9162         /* Flush PCI posted writes.  The normal MMIO registers
9163          * are inaccessible at this time so this is the only
9164          * way to make this reliably (actually, this is no longer
9165          * the case, see above).  I tried to use indirect
9166          * register read/write but this upset some 5701 variants.
9167          */
9168         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9169
9170         udelay(120);
9171
9172         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9173                 u16 val16;
9174
9175                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9176                         int j;
9177                         u32 cfg_val;
9178
9179                         /* Wait for link training to complete.  */
9180                         for (j = 0; j < 5000; j++)
9181                                 udelay(100);
9182
9183                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9184                         pci_write_config_dword(tp->pdev, 0xc4,
9185                                                cfg_val | (1 << 15));
9186                 }
9187
9188                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9189                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9190                 /*
9191                  * Older PCIe devices only support the 128 byte
9192                  * MPS setting.  Enforce the restriction.
9193                  */
9194                 if (!tg3_flag(tp, CPMU_PRESENT))
9195                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9196                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9197
9198                 /* Clear error status */
9199                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9200                                       PCI_EXP_DEVSTA_CED |
9201                                       PCI_EXP_DEVSTA_NFED |
9202                                       PCI_EXP_DEVSTA_FED |
9203                                       PCI_EXP_DEVSTA_URD);
9204         }
9205
9206         tg3_restore_pci_state(tp);
9207
9208         tg3_flag_clear(tp, CHIP_RESETTING);
9209         tg3_flag_clear(tp, ERROR_PROCESSED);
9210
9211         val = 0;
9212         if (tg3_flag(tp, 5780_CLASS))
9213                 val = tr32(MEMARB_MODE);
9214         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9215
9216         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9217                 tg3_stop_fw(tp);
9218                 tw32(0x5000, 0x400);
9219         }
9220
9221         if (tg3_flag(tp, IS_SSB_CORE)) {
9222                 /*
9223                  * BCM4785: In order to avoid repercussions from using
9224                  * potentially defective internal ROM, stop the Rx RISC CPU,
9225                  * which is not required.
9226                  */
9227                 tg3_stop_fw(tp);
9228                 tg3_halt_cpu(tp, RX_CPU_BASE);
9229         }
9230
9231         err = tg3_poll_fw(tp);
9232         if (err)
9233                 return err;
9234
9235         tw32(GRC_MODE, tp->grc_mode);
9236
9237         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9238                 val = tr32(0xc4);
9239
9240                 tw32(0xc4, val | (1 << 15));
9241         }
9242
9243         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9244             tg3_asic_rev(tp) == ASIC_REV_5705) {
9245                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9246                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9247                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9248                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9249         }
9250
9251         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9252                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9253                 val = tp->mac_mode;
9254         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9255                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9256                 val = tp->mac_mode;
9257         } else
9258                 val = 0;
9259
9260         tw32_f(MAC_MODE, val);
9261         udelay(40);
9262
9263         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9264
9265         tg3_mdio_start(tp);
9266
9267         if (tg3_flag(tp, PCI_EXPRESS) &&
9268             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9269             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9270             !tg3_flag(tp, 57765_PLUS)) {
9271                 val = tr32(0x7c00);
9272
9273                 tw32(0x7c00, val | (1 << 25));
9274         }
9275
9276         tg3_restore_clk(tp);
9277
9278         /* Reprobe ASF enable state.  */
9279         tg3_flag_clear(tp, ENABLE_ASF);
9280         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9281                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9282
9283         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9284         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9285         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9286                 u32 nic_cfg;
9287
9288                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9289                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9290                         tg3_flag_set(tp, ENABLE_ASF);
9291                         tp->last_event_jiffies = jiffies;
9292                         if (tg3_flag(tp, 5750_PLUS))
9293                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9294
9295                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9296                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9297                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9298                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9299                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9300                 }
9301         }
9302
9303         return 0;
9304 }
9305
9306 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9307 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9308 static void __tg3_set_rx_mode(struct net_device *);
9309
9310 /* tp->lock is held. */
9311 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9312 {
9313         int err;
9314
9315         tg3_stop_fw(tp);
9316
9317         tg3_write_sig_pre_reset(tp, kind);
9318
9319         tg3_abort_hw(tp, silent);
9320         err = tg3_chip_reset(tp);
9321
9322         __tg3_set_mac_addr(tp, false);
9323
9324         tg3_write_sig_legacy(tp, kind);
9325         tg3_write_sig_post_reset(tp, kind);
9326
9327         if (tp->hw_stats) {
9328                 /* Save the stats across chip resets... */
9329                 tg3_get_nstats(tp, &tp->net_stats_prev);
9330                 tg3_get_estats(tp, &tp->estats_prev);
9331
9332                 /* And make sure the next sample is new data */
9333                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9334         }
9335
9336         return err;
9337 }
9338
9339 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9340 {
9341         struct tg3 *tp = netdev_priv(dev);
9342         struct sockaddr *addr = p;
9343         int err = 0;
9344         bool skip_mac_1 = false;
9345
9346         if (!is_valid_ether_addr(addr->sa_data))
9347                 return -EADDRNOTAVAIL;
9348
9349         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9350
9351         if (!netif_running(dev))
9352                 return 0;
9353
9354         if (tg3_flag(tp, ENABLE_ASF)) {
9355                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9356
9357                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9358                 addr0_low = tr32(MAC_ADDR_0_LOW);
9359                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9360                 addr1_low = tr32(MAC_ADDR_1_LOW);
9361
9362                 /* Skip MAC addr 1 if ASF is using it. */
9363                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9364                     !(addr1_high == 0 && addr1_low == 0))
9365                         skip_mac_1 = true;
9366         }
9367         spin_lock_bh(&tp->lock);
9368         __tg3_set_mac_addr(tp, skip_mac_1);
9369         __tg3_set_rx_mode(dev);
9370         spin_unlock_bh(&tp->lock);
9371
9372         return err;
9373 }
9374
9375 /* tp->lock is held. */
9376 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9377                            dma_addr_t mapping, u32 maxlen_flags,
9378                            u32 nic_addr)
9379 {
9380         tg3_write_mem(tp,
9381                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9382                       ((u64) mapping >> 32));
9383         tg3_write_mem(tp,
9384                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9385                       ((u64) mapping & 0xffffffff));
9386         tg3_write_mem(tp,
9387                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9388                        maxlen_flags);
9389
9390         if (!tg3_flag(tp, 5705_PLUS))
9391                 tg3_write_mem(tp,
9392                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9393                               nic_addr);
9394 }
9395
9396
9397 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9398 {
9399         int i = 0;
9400
9401         if (!tg3_flag(tp, ENABLE_TSS)) {
9402                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9403                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9404                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9405         } else {
9406                 tw32(HOSTCC_TXCOL_TICKS, 0);
9407                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9408                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9409
9410                 for (; i < tp->txq_cnt; i++) {
9411                         u32 reg;
9412
9413                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9414                         tw32(reg, ec->tx_coalesce_usecs);
9415                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9416                         tw32(reg, ec->tx_max_coalesced_frames);
9417                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9418                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9419                 }
9420         }
9421
9422         for (; i < tp->irq_max - 1; i++) {
9423                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9424                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9425                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9426         }
9427 }
9428
9429 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9430 {
9431         int i = 0;
9432         u32 limit = tp->rxq_cnt;
9433
9434         if (!tg3_flag(tp, ENABLE_RSS)) {
9435                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9436                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9437                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9438                 limit--;
9439         } else {
9440                 tw32(HOSTCC_RXCOL_TICKS, 0);
9441                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9442                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9443         }
9444
9445         for (; i < limit; i++) {
9446                 u32 reg;
9447
9448                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9449                 tw32(reg, ec->rx_coalesce_usecs);
9450                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9451                 tw32(reg, ec->rx_max_coalesced_frames);
9452                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9453                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9454         }
9455
9456         for (; i < tp->irq_max - 1; i++) {
9457                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9458                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9459                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9460         }
9461 }
9462
9463 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9464 {
9465         tg3_coal_tx_init(tp, ec);
9466         tg3_coal_rx_init(tp, ec);
9467
9468         if (!tg3_flag(tp, 5705_PLUS)) {
9469                 u32 val = ec->stats_block_coalesce_usecs;
9470
9471                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9472                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9473
9474                 if (!tp->link_up)
9475                         val = 0;
9476
9477                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9478         }
9479 }
9480
9481 /* tp->lock is held. */
9482 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9483 {
9484         u32 txrcb, limit;
9485
9486         /* Disable all transmit rings but the first. */
9487         if (!tg3_flag(tp, 5705_PLUS))
9488                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9489         else if (tg3_flag(tp, 5717_PLUS))
9490                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9491         else if (tg3_flag(tp, 57765_CLASS) ||
9492                  tg3_asic_rev(tp) == ASIC_REV_5762)
9493                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9494         else
9495                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9496
9497         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9498              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9499                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9500                               BDINFO_FLAGS_DISABLED);
9501 }
9502
9503 /* tp->lock is held. */
9504 static void tg3_tx_rcbs_init(struct tg3 *tp)
9505 {
9506         int i = 0;
9507         u32 txrcb = NIC_SRAM_SEND_RCB;
9508
9509         if (tg3_flag(tp, ENABLE_TSS))
9510                 i++;
9511
9512         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9513                 struct tg3_napi *tnapi = &tp->napi[i];
9514
9515                 if (!tnapi->tx_ring)
9516                         continue;
9517
9518                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9519                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9520                                NIC_SRAM_TX_BUFFER_DESC);
9521         }
9522 }
9523
9524 /* tp->lock is held. */
9525 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9526 {
9527         u32 rxrcb, limit;
9528
9529         /* Disable all receive return rings but the first. */
9530         if (tg3_flag(tp, 5717_PLUS))
9531                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9532         else if (!tg3_flag(tp, 5705_PLUS))
9533                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9534         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9535                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9536                  tg3_flag(tp, 57765_CLASS))
9537                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9538         else
9539                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9540
9541         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9542              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9543                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9544                               BDINFO_FLAGS_DISABLED);
9545 }
9546
9547 /* tp->lock is held. */
9548 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9549 {
9550         int i = 0;
9551         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9552
9553         if (tg3_flag(tp, ENABLE_RSS))
9554                 i++;
9555
9556         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9557                 struct tg3_napi *tnapi = &tp->napi[i];
9558
9559                 if (!tnapi->rx_rcb)
9560                         continue;
9561
9562                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9563                                (tp->rx_ret_ring_mask + 1) <<
9564                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9565         }
9566 }
9567
9568 /* tp->lock is held. */
9569 static void tg3_rings_reset(struct tg3 *tp)
9570 {
9571         int i;
9572         u32 stblk;
9573         struct tg3_napi *tnapi = &tp->napi[0];
9574
9575         tg3_tx_rcbs_disable(tp);
9576
9577         tg3_rx_ret_rcbs_disable(tp);
9578
9579         /* Disable interrupts */
9580         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9581         tp->napi[0].chk_msi_cnt = 0;
9582         tp->napi[0].last_rx_cons = 0;
9583         tp->napi[0].last_tx_cons = 0;
9584
9585         /* Zero mailbox registers. */
9586         if (tg3_flag(tp, SUPPORT_MSIX)) {
9587                 for (i = 1; i < tp->irq_max; i++) {
9588                         tp->napi[i].tx_prod = 0;
9589                         tp->napi[i].tx_cons = 0;
9590                         if (tg3_flag(tp, ENABLE_TSS))
9591                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9592                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9593                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9594                         tp->napi[i].chk_msi_cnt = 0;
9595                         tp->napi[i].last_rx_cons = 0;
9596                         tp->napi[i].last_tx_cons = 0;
9597                 }
9598                 if (!tg3_flag(tp, ENABLE_TSS))
9599                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9600         } else {
9601                 tp->napi[0].tx_prod = 0;
9602                 tp->napi[0].tx_cons = 0;
9603                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9604                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9605         }
9606
9607         /* Make sure the NIC-based send BD rings are disabled. */
9608         if (!tg3_flag(tp, 5705_PLUS)) {
9609                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9610                 for (i = 0; i < 16; i++)
9611                         tw32_tx_mbox(mbox + i * 8, 0);
9612         }
9613
9614         /* Clear status block in ram. */
9615         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9616
9617         /* Set status block DMA address */
9618         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9619              ((u64) tnapi->status_mapping >> 32));
9620         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9621              ((u64) tnapi->status_mapping & 0xffffffff));
9622
9623         stblk = HOSTCC_STATBLCK_RING1;
9624
9625         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9626                 u64 mapping = (u64)tnapi->status_mapping;
9627                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9628                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9629                 stblk += 8;
9630
9631                 /* Clear status block in ram. */
9632                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9633         }
9634
9635         tg3_tx_rcbs_init(tp);
9636         tg3_rx_ret_rcbs_init(tp);
9637 }
9638
9639 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9640 {
9641         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9642
9643         if (!tg3_flag(tp, 5750_PLUS) ||
9644             tg3_flag(tp, 5780_CLASS) ||
9645             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9646             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9647             tg3_flag(tp, 57765_PLUS))
9648                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9649         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9650                  tg3_asic_rev(tp) == ASIC_REV_5787)
9651                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9652         else
9653                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9654
9655         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9656         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9657
9658         val = min(nic_rep_thresh, host_rep_thresh);
9659         tw32(RCVBDI_STD_THRESH, val);
9660
9661         if (tg3_flag(tp, 57765_PLUS))
9662                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9663
9664         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9665                 return;
9666
9667         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9668
9669         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9670
9671         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9672         tw32(RCVBDI_JUMBO_THRESH, val);
9673
9674         if (tg3_flag(tp, 57765_PLUS))
9675                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9676 }
9677
9678 static inline u32 calc_crc(unsigned char *buf, int len)
9679 {
9680         u32 reg;
9681         u32 tmp;
9682         int j, k;
9683
9684         reg = 0xffffffff;
9685
9686         for (j = 0; j < len; j++) {
9687                 reg ^= buf[j];
9688
9689                 for (k = 0; k < 8; k++) {
9690                         tmp = reg & 0x01;
9691
9692                         reg >>= 1;
9693
9694                         if (tmp)
9695                                 reg ^= 0xedb88320;
9696                 }
9697         }
9698
9699         return ~reg;
9700 }
9701
9702 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9703 {
9704         /* accept or reject all multicast frames */
9705         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9706         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9707         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9708         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9709 }
9710
9711 static void __tg3_set_rx_mode(struct net_device *dev)
9712 {
9713         struct tg3 *tp = netdev_priv(dev);
9714         u32 rx_mode;
9715
9716         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9717                                   RX_MODE_KEEP_VLAN_TAG);
9718
9719 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9720         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9721          * flag clear.
9722          */
9723         if (!tg3_flag(tp, ENABLE_ASF))
9724                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9725 #endif
9726
9727         if (dev->flags & IFF_PROMISC) {
9728                 /* Promiscuous mode. */
9729                 rx_mode |= RX_MODE_PROMISC;
9730         } else if (dev->flags & IFF_ALLMULTI) {
9731                 /* Accept all multicast. */
9732                 tg3_set_multi(tp, 1);
9733         } else if (netdev_mc_empty(dev)) {
9734                 /* Reject all multicast. */
9735                 tg3_set_multi(tp, 0);
9736         } else {
9737                 /* Accept one or more multicast(s). */
9738                 struct netdev_hw_addr *ha;
9739                 u32 mc_filter[4] = { 0, };
9740                 u32 regidx;
9741                 u32 bit;
9742                 u32 crc;
9743
9744                 netdev_for_each_mc_addr(ha, dev) {
9745                         crc = calc_crc(ha->addr, ETH_ALEN);
9746                         bit = ~crc & 0x7f;
9747                         regidx = (bit & 0x60) >> 5;
9748                         bit &= 0x1f;
9749                         mc_filter[regidx] |= (1 << bit);
9750                 }
9751
9752                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9753                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9754                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9755                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9756         }
9757
9758         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9759                 rx_mode |= RX_MODE_PROMISC;
9760         } else if (!(dev->flags & IFF_PROMISC)) {
9761                 /* Add all entries into to the mac addr filter list */
9762                 int i = 0;
9763                 struct netdev_hw_addr *ha;
9764
9765                 netdev_for_each_uc_addr(ha, dev) {
9766                         __tg3_set_one_mac_addr(tp, ha->addr,
9767                                                i + TG3_UCAST_ADDR_IDX(tp));
9768                         i++;
9769                 }
9770         }
9771
9772         if (rx_mode != tp->rx_mode) {
9773                 tp->rx_mode = rx_mode;
9774                 tw32_f(MAC_RX_MODE, rx_mode);
9775                 udelay(10);
9776         }
9777 }
9778
9779 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9780 {
9781         int i;
9782
9783         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9784                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9785 }
9786
9787 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9788 {
9789         int i;
9790
9791         if (!tg3_flag(tp, SUPPORT_MSIX))
9792                 return;
9793
9794         if (tp->rxq_cnt == 1) {
9795                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9796                 return;
9797         }
9798
9799         /* Validate table against current IRQ count */
9800         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9801                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9802                         break;
9803         }
9804
9805         if (i != TG3_RSS_INDIR_TBL_SIZE)
9806                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9807 }
9808
9809 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9810 {
9811         int i = 0;
9812         u32 reg = MAC_RSS_INDIR_TBL_0;
9813
9814         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9815                 u32 val = tp->rss_ind_tbl[i];
9816                 i++;
9817                 for (; i % 8; i++) {
9818                         val <<= 4;
9819                         val |= tp->rss_ind_tbl[i];
9820                 }
9821                 tw32(reg, val);
9822                 reg += 4;
9823         }
9824 }
9825
9826 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9827 {
9828         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9829                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9830         else
9831                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9832 }
9833
9834 /* tp->lock is held. */
9835 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9836 {
9837         u32 val, rdmac_mode;
9838         int i, err, limit;
9839         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9840
9841         tg3_disable_ints(tp);
9842
9843         tg3_stop_fw(tp);
9844
9845         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9846
9847         if (tg3_flag(tp, INIT_COMPLETE))
9848                 tg3_abort_hw(tp, 1);
9849
9850         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9851             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9852                 tg3_phy_pull_config(tp);
9853                 tg3_eee_pull_config(tp, NULL);
9854                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9855         }
9856
9857         /* Enable MAC control of LPI */
9858         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9859                 tg3_setup_eee(tp);
9860
9861         if (reset_phy)
9862                 tg3_phy_reset(tp);
9863
9864         err = tg3_chip_reset(tp);
9865         if (err)
9866                 return err;
9867
9868         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9869
9870         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9871                 val = tr32(TG3_CPMU_CTRL);
9872                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9873                 tw32(TG3_CPMU_CTRL, val);
9874
9875                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9876                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9877                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9878                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9879
9880                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9881                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9882                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9883                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9884
9885                 val = tr32(TG3_CPMU_HST_ACC);
9886                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9887                 val |= CPMU_HST_ACC_MACCLK_6_25;
9888                 tw32(TG3_CPMU_HST_ACC, val);
9889         }
9890
9891         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9892                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9893                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9894                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9895                 tw32(PCIE_PWR_MGMT_THRESH, val);
9896
9897                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9898                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9899
9900                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9901
9902                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9903                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9904         }
9905
9906         if (tg3_flag(tp, L1PLLPD_EN)) {
9907                 u32 grc_mode = tr32(GRC_MODE);
9908
9909                 /* Access the lower 1K of PL PCIE block registers. */
9910                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9911                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9912
9913                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9914                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9915                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9916
9917                 tw32(GRC_MODE, grc_mode);
9918         }
9919
9920         if (tg3_flag(tp, 57765_CLASS)) {
9921                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9922                         u32 grc_mode = tr32(GRC_MODE);
9923
9924                         /* Access the lower 1K of PL PCIE block registers. */
9925                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9926                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9927
9928                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9929                                    TG3_PCIE_PL_LO_PHYCTL5);
9930                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9931                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9932
9933                         tw32(GRC_MODE, grc_mode);
9934                 }
9935
9936                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9937                         u32 grc_mode;
9938
9939                         /* Fix transmit hangs */
9940                         val = tr32(TG3_CPMU_PADRNG_CTL);
9941                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9942                         tw32(TG3_CPMU_PADRNG_CTL, val);
9943
9944                         grc_mode = tr32(GRC_MODE);
9945
9946                         /* Access the lower 1K of DL PCIE block registers. */
9947                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9948                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9949
9950                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9951                                    TG3_PCIE_DL_LO_FTSMAX);
9952                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9953                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9954                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9955
9956                         tw32(GRC_MODE, grc_mode);
9957                 }
9958
9959                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9960                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9961                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9962                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9963         }
9964
9965         /* This works around an issue with Athlon chipsets on
9966          * B3 tigon3 silicon.  This bit has no effect on any
9967          * other revision.  But do not set this on PCI Express
9968          * chips and don't even touch the clocks if the CPMU is present.
9969          */
9970         if (!tg3_flag(tp, CPMU_PRESENT)) {
9971                 if (!tg3_flag(tp, PCI_EXPRESS))
9972                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9973                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9974         }
9975
9976         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9977             tg3_flag(tp, PCIX_MODE)) {
9978                 val = tr32(TG3PCI_PCISTATE);
9979                 val |= PCISTATE_RETRY_SAME_DMA;
9980                 tw32(TG3PCI_PCISTATE, val);
9981         }
9982
9983         if (tg3_flag(tp, ENABLE_APE)) {
9984                 /* Allow reads and writes to the
9985                  * APE register and memory space.
9986                  */
9987                 val = tr32(TG3PCI_PCISTATE);
9988                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9989                        PCISTATE_ALLOW_APE_SHMEM_WR |
9990                        PCISTATE_ALLOW_APE_PSPACE_WR;
9991                 tw32(TG3PCI_PCISTATE, val);
9992         }
9993
9994         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9995                 /* Enable some hw fixes.  */
9996                 val = tr32(TG3PCI_MSI_DATA);
9997                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9998                 tw32(TG3PCI_MSI_DATA, val);
9999         }
10000
10001         /* Descriptor ring init may make accesses to the
10002          * NIC SRAM area to setup the TX descriptors, so we
10003          * can only do this after the hardware has been
10004          * successfully reset.
10005          */
10006         err = tg3_init_rings(tp);
10007         if (err)
10008                 return err;
10009
10010         if (tg3_flag(tp, 57765_PLUS)) {
10011                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10012                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10013                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10014                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10015                 if (!tg3_flag(tp, 57765_CLASS) &&
10016                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10017                     tg3_asic_rev(tp) != ASIC_REV_5762)
10018                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10019                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10020         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10021                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10022                 /* This value is determined during the probe time DMA
10023                  * engine test, tg3_test_dma.
10024                  */
10025                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10026         }
10027
10028         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10029                           GRC_MODE_4X_NIC_SEND_RINGS |
10030                           GRC_MODE_NO_TX_PHDR_CSUM |
10031                           GRC_MODE_NO_RX_PHDR_CSUM);
10032         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10033
10034         /* Pseudo-header checksum is done by hardware logic and not
10035          * the offload processers, so make the chip do the pseudo-
10036          * header checksums on receive.  For transmit it is more
10037          * convenient to do the pseudo-header checksum in software
10038          * as Linux does that on transmit for us in all cases.
10039          */
10040         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10041
10042         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10043         if (tp->rxptpctl)
10044                 tw32(TG3_RX_PTP_CTL,
10045                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10046
10047         if (tg3_flag(tp, PTP_CAPABLE))
10048                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10049
10050         tw32(GRC_MODE, tp->grc_mode | val);
10051
10052         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10053         val = tr32(GRC_MISC_CFG);
10054         val &= ~0xff;
10055         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10056         tw32(GRC_MISC_CFG, val);
10057
10058         /* Initialize MBUF/DESC pool. */
10059         if (tg3_flag(tp, 5750_PLUS)) {
10060                 /* Do nothing.  */
10061         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10062                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10063                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10064                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10065                 else
10066                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10067                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10068                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10069         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10070                 int fw_len;
10071
10072                 fw_len = tp->fw_len;
10073                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10074                 tw32(BUFMGR_MB_POOL_ADDR,
10075                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10076                 tw32(BUFMGR_MB_POOL_SIZE,
10077                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10078         }
10079
10080         if (tp->dev->mtu <= ETH_DATA_LEN) {
10081                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10082                      tp->bufmgr_config.mbuf_read_dma_low_water);
10083                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10084                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10085                 tw32(BUFMGR_MB_HIGH_WATER,
10086                      tp->bufmgr_config.mbuf_high_water);
10087         } else {
10088                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10089                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10090                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10091                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10092                 tw32(BUFMGR_MB_HIGH_WATER,
10093                      tp->bufmgr_config.mbuf_high_water_jumbo);
10094         }
10095         tw32(BUFMGR_DMA_LOW_WATER,
10096              tp->bufmgr_config.dma_low_water);
10097         tw32(BUFMGR_DMA_HIGH_WATER,
10098              tp->bufmgr_config.dma_high_water);
10099
10100         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10101         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10102                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10103         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10104             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10105             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10106             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10107                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10108         tw32(BUFMGR_MODE, val);
10109         for (i = 0; i < 2000; i++) {
10110                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10111                         break;
10112                 udelay(10);
10113         }
10114         if (i >= 2000) {
10115                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10116                 return -ENODEV;
10117         }
10118
10119         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10120                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10121
10122         tg3_setup_rxbd_thresholds(tp);
10123
10124         /* Initialize TG3_BDINFO's at:
10125          *  RCVDBDI_STD_BD:     standard eth size rx ring
10126          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10127          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10128          *
10129          * like so:
10130          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10131          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10132          *                              ring attribute flags
10133          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10134          *
10135          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10136          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10137          *
10138          * The size of each ring is fixed in the firmware, but the location is
10139          * configurable.
10140          */
10141         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10142              ((u64) tpr->rx_std_mapping >> 32));
10143         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10144              ((u64) tpr->rx_std_mapping & 0xffffffff));
10145         if (!tg3_flag(tp, 5717_PLUS))
10146                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10147                      NIC_SRAM_RX_BUFFER_DESC);
10148
10149         /* Disable the mini ring */
10150         if (!tg3_flag(tp, 5705_PLUS))
10151                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10152                      BDINFO_FLAGS_DISABLED);
10153
10154         /* Program the jumbo buffer descriptor ring control
10155          * blocks on those devices that have them.
10156          */
10157         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10158             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10159
10160                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10161                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10162                              ((u64) tpr->rx_jmb_mapping >> 32));
10163                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10164                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10165                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10166                               BDINFO_FLAGS_MAXLEN_SHIFT;
10167                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10168                              val | BDINFO_FLAGS_USE_EXT_RECV);
10169                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10170                             tg3_flag(tp, 57765_CLASS) ||
10171                             tg3_asic_rev(tp) == ASIC_REV_5762)
10172                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10173                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10174                 } else {
10175                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10176                              BDINFO_FLAGS_DISABLED);
10177                 }
10178
10179                 if (tg3_flag(tp, 57765_PLUS)) {
10180                         val = TG3_RX_STD_RING_SIZE(tp);
10181                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10182                         val |= (TG3_RX_STD_DMA_SZ << 2);
10183                 } else
10184                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10185         } else
10186                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10187
10188         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10189
10190         tpr->rx_std_prod_idx = tp->rx_pending;
10191         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10192
10193         tpr->rx_jmb_prod_idx =
10194                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10195         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10196
10197         tg3_rings_reset(tp);
10198
10199         /* Initialize MAC address and backoff seed. */
10200         __tg3_set_mac_addr(tp, false);
10201
10202         /* MTU + ethernet header + FCS + optional VLAN tag */
10203         tw32(MAC_RX_MTU_SIZE,
10204              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10205
10206         /* The slot time is changed by tg3_setup_phy if we
10207          * run at gigabit with half duplex.
10208          */
10209         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10210               (6 << TX_LENGTHS_IPG_SHIFT) |
10211               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10212
10213         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10214             tg3_asic_rev(tp) == ASIC_REV_5762)
10215                 val |= tr32(MAC_TX_LENGTHS) &
10216                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10217                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10218
10219         tw32(MAC_TX_LENGTHS, val);
10220
10221         /* Receive rules. */
10222         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10223         tw32(RCVLPC_CONFIG, 0x0181);
10224
10225         /* Calculate RDMAC_MODE setting early, we need it to determine
10226          * the RCVLPC_STATE_ENABLE mask.
10227          */
10228         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10229                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10230                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10231                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10232                       RDMAC_MODE_LNGREAD_ENAB);
10233
10234         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10235                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10236
10237         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10238             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10239             tg3_asic_rev(tp) == ASIC_REV_57780)
10240                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10241                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10242                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10243
10244         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10245             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10246                 if (tg3_flag(tp, TSO_CAPABLE) &&
10247                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10248                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10249                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10250                            !tg3_flag(tp, IS_5788)) {
10251                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10252                 }
10253         }
10254
10255         if (tg3_flag(tp, PCI_EXPRESS))
10256                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10257
10258         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10259                 tp->dma_limit = 0;
10260                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10261                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10262                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10263                 }
10264         }
10265
10266         if (tg3_flag(tp, HW_TSO_1) ||
10267             tg3_flag(tp, HW_TSO_2) ||
10268             tg3_flag(tp, HW_TSO_3))
10269                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10270
10271         if (tg3_flag(tp, 57765_PLUS) ||
10272             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10273             tg3_asic_rev(tp) == ASIC_REV_57780)
10274                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10275
10276         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10277             tg3_asic_rev(tp) == ASIC_REV_5762)
10278                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10279
10280         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10281             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10282             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10283             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10284             tg3_flag(tp, 57765_PLUS)) {
10285                 u32 tgtreg;
10286
10287                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10288                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10289                 else
10290                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10291
10292                 val = tr32(tgtreg);
10293                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10294                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10295                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10296                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10297                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10298                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10299                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10300                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10301                 }
10302                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10303         }
10304
10305         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10306             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10307             tg3_asic_rev(tp) == ASIC_REV_5762) {
10308                 u32 tgtreg;
10309
10310                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10311                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10312                 else
10313                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10314
10315                 val = tr32(tgtreg);
10316                 tw32(tgtreg, val |
10317                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10318                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10319         }
10320
10321         /* Receive/send statistics. */
10322         if (tg3_flag(tp, 5750_PLUS)) {
10323                 val = tr32(RCVLPC_STATS_ENABLE);
10324                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10325                 tw32(RCVLPC_STATS_ENABLE, val);
10326         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10327                    tg3_flag(tp, TSO_CAPABLE)) {
10328                 val = tr32(RCVLPC_STATS_ENABLE);
10329                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10330                 tw32(RCVLPC_STATS_ENABLE, val);
10331         } else {
10332                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10333         }
10334         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10335         tw32(SNDDATAI_STATSENAB, 0xffffff);
10336         tw32(SNDDATAI_STATSCTRL,
10337              (SNDDATAI_SCTRL_ENABLE |
10338               SNDDATAI_SCTRL_FASTUPD));
10339
10340         /* Setup host coalescing engine. */
10341         tw32(HOSTCC_MODE, 0);
10342         for (i = 0; i < 2000; i++) {
10343                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10344                         break;
10345                 udelay(10);
10346         }
10347
10348         __tg3_set_coalesce(tp, &tp->coal);
10349
10350         if (!tg3_flag(tp, 5705_PLUS)) {
10351                 /* Status/statistics block address.  See tg3_timer,
10352                  * the tg3_periodic_fetch_stats call there, and
10353                  * tg3_get_stats to see how this works for 5705/5750 chips.
10354                  */
10355                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10356                      ((u64) tp->stats_mapping >> 32));
10357                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10358                      ((u64) tp->stats_mapping & 0xffffffff));
10359                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10360
10361                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10362
10363                 /* Clear statistics and status block memory areas */
10364                 for (i = NIC_SRAM_STATS_BLK;
10365                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10366                      i += sizeof(u32)) {
10367                         tg3_write_mem(tp, i, 0);
10368                         udelay(40);
10369                 }
10370         }
10371
10372         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10373
10374         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10375         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10376         if (!tg3_flag(tp, 5705_PLUS))
10377                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10378
10379         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10380                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10381                 /* reset to prevent losing 1st rx packet intermittently */
10382                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10383                 udelay(10);
10384         }
10385
10386         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10387                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10388                         MAC_MODE_FHDE_ENABLE;
10389         if (tg3_flag(tp, ENABLE_APE))
10390                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10391         if (!tg3_flag(tp, 5705_PLUS) &&
10392             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10393             tg3_asic_rev(tp) != ASIC_REV_5700)
10394                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10395         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10396         udelay(40);
10397
10398         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10399          * If TG3_FLAG_IS_NIC is zero, we should read the
10400          * register to preserve the GPIO settings for LOMs. The GPIOs,
10401          * whether used as inputs or outputs, are set by boot code after
10402          * reset.
10403          */
10404         if (!tg3_flag(tp, IS_NIC)) {
10405                 u32 gpio_mask;
10406
10407                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10408                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10409                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10410
10411                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10412                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10413                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10414
10415                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10416                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10417
10418                 tp->grc_local_ctrl &= ~gpio_mask;
10419                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10420
10421                 /* GPIO1 must be driven high for eeprom write protect */
10422                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10423                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10424                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10425         }
10426         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10427         udelay(100);
10428
10429         if (tg3_flag(tp, USING_MSIX)) {
10430                 val = tr32(MSGINT_MODE);
10431                 val |= MSGINT_MODE_ENABLE;
10432                 if (tp->irq_cnt > 1)
10433                         val |= MSGINT_MODE_MULTIVEC_EN;
10434                 if (!tg3_flag(tp, 1SHOT_MSI))
10435                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10436                 tw32(MSGINT_MODE, val);
10437         }
10438
10439         if (!tg3_flag(tp, 5705_PLUS)) {
10440                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10441                 udelay(40);
10442         }
10443
10444         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10445                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10446                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10447                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10448                WDMAC_MODE_LNGREAD_ENAB);
10449
10450         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10451             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10452                 if (tg3_flag(tp, TSO_CAPABLE) &&
10453                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10454                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10455                         /* nothing */
10456                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10457                            !tg3_flag(tp, IS_5788)) {
10458                         val |= WDMAC_MODE_RX_ACCEL;
10459                 }
10460         }
10461
10462         /* Enable host coalescing bug fix */
10463         if (tg3_flag(tp, 5755_PLUS))
10464                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10465
10466         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10467                 val |= WDMAC_MODE_BURST_ALL_DATA;
10468
10469         tw32_f(WDMAC_MODE, val);
10470         udelay(40);
10471
10472         if (tg3_flag(tp, PCIX_MODE)) {
10473                 u16 pcix_cmd;
10474
10475                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10476                                      &pcix_cmd);
10477                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10478                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10479                         pcix_cmd |= PCI_X_CMD_READ_2K;
10480                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10481                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10482                         pcix_cmd |= PCI_X_CMD_READ_2K;
10483                 }
10484                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10485                                       pcix_cmd);
10486         }
10487
10488         tw32_f(RDMAC_MODE, rdmac_mode);
10489         udelay(40);
10490
10491         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10492             tg3_asic_rev(tp) == ASIC_REV_5720) {
10493                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10494                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10495                                 break;
10496                 }
10497                 if (i < TG3_NUM_RDMA_CHANNELS) {
10498                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10499                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10500                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10501                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10502                 }
10503         }
10504
10505         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10506         if (!tg3_flag(tp, 5705_PLUS))
10507                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10508
10509         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10510                 tw32(SNDDATAC_MODE,
10511                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10512         else
10513                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10514
10515         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10516         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10517         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10518         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10519                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10520         tw32(RCVDBDI_MODE, val);
10521         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10522         if (tg3_flag(tp, HW_TSO_1) ||
10523             tg3_flag(tp, HW_TSO_2) ||
10524             tg3_flag(tp, HW_TSO_3))
10525                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10526         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10527         if (tg3_flag(tp, ENABLE_TSS))
10528                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10529         tw32(SNDBDI_MODE, val);
10530         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10531
10532         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10533                 err = tg3_load_5701_a0_firmware_fix(tp);
10534                 if (err)
10535                         return err;
10536         }
10537
10538         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10539                 /* Ignore any errors for the firmware download. If download
10540                  * fails, the device will operate with EEE disabled
10541                  */
10542                 tg3_load_57766_firmware(tp);
10543         }
10544
10545         if (tg3_flag(tp, TSO_CAPABLE)) {
10546                 err = tg3_load_tso_firmware(tp);
10547                 if (err)
10548                         return err;
10549         }
10550
10551         tp->tx_mode = TX_MODE_ENABLE;
10552
10553         if (tg3_flag(tp, 5755_PLUS) ||
10554             tg3_asic_rev(tp) == ASIC_REV_5906)
10555                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10556
10557         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10558             tg3_asic_rev(tp) == ASIC_REV_5762) {
10559                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10560                 tp->tx_mode &= ~val;
10561                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10562         }
10563
10564         tw32_f(MAC_TX_MODE, tp->tx_mode);
10565         udelay(100);
10566
10567         if (tg3_flag(tp, ENABLE_RSS)) {
10568                 u32 rss_key[10];
10569
10570                 tg3_rss_write_indir_tbl(tp);
10571
10572                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10573
10574                 for (i = 0; i < 10 ; i++)
10575                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10576         }
10577
10578         tp->rx_mode = RX_MODE_ENABLE;
10579         if (tg3_flag(tp, 5755_PLUS))
10580                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10581
10582         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10583                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10584
10585         if (tg3_flag(tp, ENABLE_RSS))
10586                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10587                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10588                                RX_MODE_RSS_IPV6_HASH_EN |
10589                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10590                                RX_MODE_RSS_IPV4_HASH_EN |
10591                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10592
10593         tw32_f(MAC_RX_MODE, tp->rx_mode);
10594         udelay(10);
10595
10596         tw32(MAC_LED_CTRL, tp->led_ctrl);
10597
10598         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10599         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10600                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10601                 udelay(10);
10602         }
10603         tw32_f(MAC_RX_MODE, tp->rx_mode);
10604         udelay(10);
10605
10606         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10607                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10608                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10609                         /* Set drive transmission level to 1.2V  */
10610                         /* only if the signal pre-emphasis bit is not set  */
10611                         val = tr32(MAC_SERDES_CFG);
10612                         val &= 0xfffff000;
10613                         val |= 0x880;
10614                         tw32(MAC_SERDES_CFG, val);
10615                 }
10616                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10617                         tw32(MAC_SERDES_CFG, 0x616000);
10618         }
10619
10620         /* Prevent chip from dropping frames when flow control
10621          * is enabled.
10622          */
10623         if (tg3_flag(tp, 57765_CLASS))
10624                 val = 1;
10625         else
10626                 val = 2;
10627         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10628
10629         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10630             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10631                 /* Use hardware link auto-negotiation */
10632                 tg3_flag_set(tp, HW_AUTONEG);
10633         }
10634
10635         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10636             tg3_asic_rev(tp) == ASIC_REV_5714) {
10637                 u32 tmp;
10638
10639                 tmp = tr32(SERDES_RX_CTRL);
10640                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10641                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10642                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10643                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10644         }
10645
10646         if (!tg3_flag(tp, USE_PHYLIB)) {
10647                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10648                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10649
10650                 err = tg3_setup_phy(tp, false);
10651                 if (err)
10652                         return err;
10653
10654                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10655                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10656                         u32 tmp;
10657
10658                         /* Clear CRC stats. */
10659                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10660                                 tg3_writephy(tp, MII_TG3_TEST1,
10661                                              tmp | MII_TG3_TEST1_CRC_EN);
10662                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10663                         }
10664                 }
10665         }
10666
10667         __tg3_set_rx_mode(tp->dev);
10668
10669         /* Initialize receive rules. */
10670         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10671         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10672         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10673         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10674
10675         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10676                 limit = 8;
10677         else
10678                 limit = 16;
10679         if (tg3_flag(tp, ENABLE_ASF))
10680                 limit -= 4;
10681         switch (limit) {
10682         case 16:
10683                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10684         case 15:
10685                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10686         case 14:
10687                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10688         case 13:
10689                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10690         case 12:
10691                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10692         case 11:
10693                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10694         case 10:
10695                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10696         case 9:
10697                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10698         case 8:
10699                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10700         case 7:
10701                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10702         case 6:
10703                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10704         case 5:
10705                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10706         case 4:
10707                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10708         case 3:
10709                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10710         case 2:
10711         case 1:
10712
10713         default:
10714                 break;
10715         }
10716
10717         if (tg3_flag(tp, ENABLE_APE))
10718                 /* Write our heartbeat update interval to APE. */
10719                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10720                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10721
10722         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10723
10724         return 0;
10725 }
10726
10727 /* Called at device open time to get the chip ready for
10728  * packet processing.  Invoked with tp->lock held.
10729  */
10730 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10731 {
10732         /* Chip may have been just powered on. If so, the boot code may still
10733          * be running initialization. Wait for it to finish to avoid races in
10734          * accessing the hardware.
10735          */
10736         tg3_enable_register_access(tp);
10737         tg3_poll_fw(tp);
10738
10739         tg3_switch_clocks(tp);
10740
10741         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10742
10743         return tg3_reset_hw(tp, reset_phy);
10744 }
10745
10746 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10747 {
10748         int i;
10749
10750         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10751                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10752
10753                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10754                 off += len;
10755
10756                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10757                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10758                         memset(ocir, 0, TG3_OCIR_LEN);
10759         }
10760 }
10761
10762 /* sysfs attributes for hwmon */
10763 static ssize_t tg3_show_temp(struct device *dev,
10764                              struct device_attribute *devattr, char *buf)
10765 {
10766         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10767         struct tg3 *tp = dev_get_drvdata(dev);
10768         u32 temperature;
10769
10770         spin_lock_bh(&tp->lock);
10771         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10772                                 sizeof(temperature));
10773         spin_unlock_bh(&tp->lock);
10774         return sprintf(buf, "%u\n", temperature * 1000);
10775 }
10776
10777
10778 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10779                           TG3_TEMP_SENSOR_OFFSET);
10780 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10781                           TG3_TEMP_CAUTION_OFFSET);
10782 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10783                           TG3_TEMP_MAX_OFFSET);
10784
10785 static struct attribute *tg3_attrs[] = {
10786         &sensor_dev_attr_temp1_input.dev_attr.attr,
10787         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10788         &sensor_dev_attr_temp1_max.dev_attr.attr,
10789         NULL
10790 };
10791 ATTRIBUTE_GROUPS(tg3);
10792
10793 static void tg3_hwmon_close(struct tg3 *tp)
10794 {
10795         if (tp->hwmon_dev) {
10796                 hwmon_device_unregister(tp->hwmon_dev);
10797                 tp->hwmon_dev = NULL;
10798         }
10799 }
10800
10801 static void tg3_hwmon_open(struct tg3 *tp)
10802 {
10803         int i;
10804         u32 size = 0;
10805         struct pci_dev *pdev = tp->pdev;
10806         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10807
10808         tg3_sd_scan_scratchpad(tp, ocirs);
10809
10810         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10811                 if (!ocirs[i].src_data_length)
10812                         continue;
10813
10814                 size += ocirs[i].src_hdr_length;
10815                 size += ocirs[i].src_data_length;
10816         }
10817
10818         if (!size)
10819                 return;
10820
10821         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10822                                                           tp, tg3_groups);
10823         if (IS_ERR(tp->hwmon_dev)) {
10824                 tp->hwmon_dev = NULL;
10825                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10826         }
10827 }
10828
10829
10830 #define TG3_STAT_ADD32(PSTAT, REG) \
10831 do {    u32 __val = tr32(REG); \
10832         (PSTAT)->low += __val; \
10833         if ((PSTAT)->low < __val) \
10834                 (PSTAT)->high += 1; \
10835 } while (0)
10836
10837 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10838 {
10839         struct tg3_hw_stats *sp = tp->hw_stats;
10840
10841         if (!tp->link_up)
10842                 return;
10843
10844         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10845         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10846         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10847         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10848         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10849         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10850         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10851         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10852         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10853         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10854         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10855         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10856         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10857         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10858                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10859                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10860                 u32 val;
10861
10862                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10863                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10864                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10865                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10866         }
10867
10868         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10869         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10870         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10871         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10872         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10873         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10874         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10875         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10876         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10877         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10878         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10879         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10880         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10881         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10882
10883         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10884         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10885             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10886             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10887             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10888                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10889         } else {
10890                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10891                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10892                 if (val) {
10893                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10894                         sp->rx_discards.low += val;
10895                         if (sp->rx_discards.low < val)
10896                                 sp->rx_discards.high += 1;
10897                 }
10898                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10899         }
10900         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10901 }
10902
10903 static void tg3_chk_missed_msi(struct tg3 *tp)
10904 {
10905         u32 i;
10906
10907         for (i = 0; i < tp->irq_cnt; i++) {
10908                 struct tg3_napi *tnapi = &tp->napi[i];
10909
10910                 if (tg3_has_work(tnapi)) {
10911                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10912                             tnapi->last_tx_cons == tnapi->tx_cons) {
10913                                 if (tnapi->chk_msi_cnt < 1) {
10914                                         tnapi->chk_msi_cnt++;
10915                                         return;
10916                                 }
10917                                 tg3_msi(0, tnapi);
10918                         }
10919                 }
10920                 tnapi->chk_msi_cnt = 0;
10921                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10922                 tnapi->last_tx_cons = tnapi->tx_cons;
10923         }
10924 }
10925
10926 static void tg3_timer(unsigned long __opaque)
10927 {
10928         struct tg3 *tp = (struct tg3 *) __opaque;
10929
10930         spin_lock(&tp->lock);
10931
10932         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10933                 spin_unlock(&tp->lock);
10934                 goto restart_timer;
10935         }
10936
10937         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10938             tg3_flag(tp, 57765_CLASS))
10939                 tg3_chk_missed_msi(tp);
10940
10941         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10942                 /* BCM4785: Flush posted writes from GbE to host memory. */
10943                 tr32(HOSTCC_MODE);
10944         }
10945
10946         if (!tg3_flag(tp, TAGGED_STATUS)) {
10947                 /* All of this garbage is because when using non-tagged
10948                  * IRQ status the mailbox/status_block protocol the chip
10949                  * uses with the cpu is race prone.
10950                  */
10951                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10952                         tw32(GRC_LOCAL_CTRL,
10953                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10954                 } else {
10955                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10956                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10957                 }
10958
10959                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10960                         spin_unlock(&tp->lock);
10961                         tg3_reset_task_schedule(tp);
10962                         goto restart_timer;
10963                 }
10964         }
10965
10966         /* This part only runs once per second. */
10967         if (!--tp->timer_counter) {
10968                 if (tg3_flag(tp, 5705_PLUS))
10969                         tg3_periodic_fetch_stats(tp);
10970
10971                 if (tp->setlpicnt && !--tp->setlpicnt)
10972                         tg3_phy_eee_enable(tp);
10973
10974                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10975                         u32 mac_stat;
10976                         int phy_event;
10977
10978                         mac_stat = tr32(MAC_STATUS);
10979
10980                         phy_event = 0;
10981                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10982                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10983                                         phy_event = 1;
10984                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10985                                 phy_event = 1;
10986
10987                         if (phy_event)
10988                                 tg3_setup_phy(tp, false);
10989                 } else if (tg3_flag(tp, POLL_SERDES)) {
10990                         u32 mac_stat = tr32(MAC_STATUS);
10991                         int need_setup = 0;
10992
10993                         if (tp->link_up &&
10994                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10995                                 need_setup = 1;
10996                         }
10997                         if (!tp->link_up &&
10998                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10999                                          MAC_STATUS_SIGNAL_DET))) {
11000                                 need_setup = 1;
11001                         }
11002                         if (need_setup) {
11003                                 if (!tp->serdes_counter) {
11004                                         tw32_f(MAC_MODE,
11005                                              (tp->mac_mode &
11006                                               ~MAC_MODE_PORT_MODE_MASK));
11007                                         udelay(40);
11008                                         tw32_f(MAC_MODE, tp->mac_mode);
11009                                         udelay(40);
11010                                 }
11011                                 tg3_setup_phy(tp, false);
11012                         }
11013                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11014                            tg3_flag(tp, 5780_CLASS)) {
11015                         tg3_serdes_parallel_detect(tp);
11016                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11017                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11018                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11019                                          TG3_CPMU_STATUS_LINK_MASK);
11020
11021                         if (link_up != tp->link_up)
11022                                 tg3_setup_phy(tp, false);
11023                 }
11024
11025                 tp->timer_counter = tp->timer_multiplier;
11026         }
11027
11028         /* Heartbeat is only sent once every 2 seconds.
11029          *
11030          * The heartbeat is to tell the ASF firmware that the host
11031          * driver is still alive.  In the event that the OS crashes,
11032          * ASF needs to reset the hardware to free up the FIFO space
11033          * that may be filled with rx packets destined for the host.
11034          * If the FIFO is full, ASF will no longer function properly.
11035          *
11036          * Unintended resets have been reported on real time kernels
11037          * where the timer doesn't run on time.  Netpoll will also have
11038          * same problem.
11039          *
11040          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11041          * to check the ring condition when the heartbeat is expiring
11042          * before doing the reset.  This will prevent most unintended
11043          * resets.
11044          */
11045         if (!--tp->asf_counter) {
11046                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11047                         tg3_wait_for_event_ack(tp);
11048
11049                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11050                                       FWCMD_NICDRV_ALIVE3);
11051                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11052                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11053                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11054
11055                         tg3_generate_fw_event(tp);
11056                 }
11057                 tp->asf_counter = tp->asf_multiplier;
11058         }
11059
11060         spin_unlock(&tp->lock);
11061
11062 restart_timer:
11063         tp->timer.expires = jiffies + tp->timer_offset;
11064         add_timer(&tp->timer);
11065 }
11066
11067 static void tg3_timer_init(struct tg3 *tp)
11068 {
11069         if (tg3_flag(tp, TAGGED_STATUS) &&
11070             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11071             !tg3_flag(tp, 57765_CLASS))
11072                 tp->timer_offset = HZ;
11073         else
11074                 tp->timer_offset = HZ / 10;
11075
11076         BUG_ON(tp->timer_offset > HZ);
11077
11078         tp->timer_multiplier = (HZ / tp->timer_offset);
11079         tp->asf_multiplier = (HZ / tp->timer_offset) *
11080                              TG3_FW_UPDATE_FREQ_SEC;
11081
11082         init_timer(&tp->timer);
11083         tp->timer.data = (unsigned long) tp;
11084         tp->timer.function = tg3_timer;
11085 }
11086
11087 static void tg3_timer_start(struct tg3 *tp)
11088 {
11089         tp->asf_counter   = tp->asf_multiplier;
11090         tp->timer_counter = tp->timer_multiplier;
11091
11092         tp->timer.expires = jiffies + tp->timer_offset;
11093         add_timer(&tp->timer);
11094 }
11095
11096 static void tg3_timer_stop(struct tg3 *tp)
11097 {
11098         del_timer_sync(&tp->timer);
11099 }
11100
11101 /* Restart hardware after configuration changes, self-test, etc.
11102  * Invoked with tp->lock held.
11103  */
11104 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11105         __releases(tp->lock)
11106         __acquires(tp->lock)
11107 {
11108         int err;
11109
11110         err = tg3_init_hw(tp, reset_phy);
11111         if (err) {
11112                 netdev_err(tp->dev,
11113                            "Failed to re-initialize device, aborting\n");
11114                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11115                 tg3_full_unlock(tp);
11116                 tg3_timer_stop(tp);
11117                 tp->irq_sync = 0;
11118                 tg3_napi_enable(tp);
11119                 dev_close(tp->dev);
11120                 tg3_full_lock(tp, 0);
11121         }
11122         return err;
11123 }
11124
11125 static void tg3_reset_task(struct work_struct *work)
11126 {
11127         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11128         int err;
11129
11130         rtnl_lock();
11131         tg3_full_lock(tp, 0);
11132
11133         if (!netif_running(tp->dev)) {
11134                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11135                 tg3_full_unlock(tp);
11136                 rtnl_unlock();
11137                 return;
11138         }
11139
11140         tg3_full_unlock(tp);
11141
11142         tg3_phy_stop(tp);
11143
11144         tg3_netif_stop(tp);
11145
11146         tg3_full_lock(tp, 1);
11147
11148         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11149                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11150                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11151                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11152                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11153         }
11154
11155         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11156         err = tg3_init_hw(tp, true);
11157         if (err)
11158                 goto out;
11159
11160         tg3_netif_start(tp);
11161
11162 out:
11163         tg3_full_unlock(tp);
11164
11165         if (!err)
11166                 tg3_phy_start(tp);
11167
11168         tg3_flag_clear(tp, RESET_TASK_PENDING);
11169         rtnl_unlock();
11170 }
11171
11172 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11173 {
11174         irq_handler_t fn;
11175         unsigned long flags;
11176         char *name;
11177         struct tg3_napi *tnapi = &tp->napi[irq_num];
11178
11179         if (tp->irq_cnt == 1)
11180                 name = tp->dev->name;
11181         else {
11182                 name = &tnapi->irq_lbl[0];
11183                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11184                         snprintf(name, IFNAMSIZ,
11185                                  "%s-txrx-%d", tp->dev->name, irq_num);
11186                 else if (tnapi->tx_buffers)
11187                         snprintf(name, IFNAMSIZ,
11188                                  "%s-tx-%d", tp->dev->name, irq_num);
11189                 else if (tnapi->rx_rcb)
11190                         snprintf(name, IFNAMSIZ,
11191                                  "%s-rx-%d", tp->dev->name, irq_num);
11192                 else
11193                         snprintf(name, IFNAMSIZ,
11194                                  "%s-%d", tp->dev->name, irq_num);
11195                 name[IFNAMSIZ-1] = 0;
11196         }
11197
11198         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11199                 fn = tg3_msi;
11200                 if (tg3_flag(tp, 1SHOT_MSI))
11201                         fn = tg3_msi_1shot;
11202                 flags = 0;
11203         } else {
11204                 fn = tg3_interrupt;
11205                 if (tg3_flag(tp, TAGGED_STATUS))
11206                         fn = tg3_interrupt_tagged;
11207                 flags = IRQF_SHARED;
11208         }
11209
11210         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11211 }
11212
11213 static int tg3_test_interrupt(struct tg3 *tp)
11214 {
11215         struct tg3_napi *tnapi = &tp->napi[0];
11216         struct net_device *dev = tp->dev;
11217         int err, i, intr_ok = 0;
11218         u32 val;
11219
11220         if (!netif_running(dev))
11221                 return -ENODEV;
11222
11223         tg3_disable_ints(tp);
11224
11225         free_irq(tnapi->irq_vec, tnapi);
11226
11227         /*
11228          * Turn off MSI one shot mode.  Otherwise this test has no
11229          * observable way to know whether the interrupt was delivered.
11230          */
11231         if (tg3_flag(tp, 57765_PLUS)) {
11232                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11233                 tw32(MSGINT_MODE, val);
11234         }
11235
11236         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11237                           IRQF_SHARED, dev->name, tnapi);
11238         if (err)
11239                 return err;
11240
11241         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11242         tg3_enable_ints(tp);
11243
11244         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11245                tnapi->coal_now);
11246
11247         for (i = 0; i < 5; i++) {
11248                 u32 int_mbox, misc_host_ctrl;
11249
11250                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11251                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11252
11253                 if ((int_mbox != 0) ||
11254                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11255                         intr_ok = 1;
11256                         break;
11257                 }
11258
11259                 if (tg3_flag(tp, 57765_PLUS) &&
11260                     tnapi->hw_status->status_tag != tnapi->last_tag)
11261                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11262
11263                 msleep(10);
11264         }
11265
11266         tg3_disable_ints(tp);
11267
11268         free_irq(tnapi->irq_vec, tnapi);
11269
11270         err = tg3_request_irq(tp, 0);
11271
11272         if (err)
11273                 return err;
11274
11275         if (intr_ok) {
11276                 /* Reenable MSI one shot mode. */
11277                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11278                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11279                         tw32(MSGINT_MODE, val);
11280                 }
11281                 return 0;
11282         }
11283
11284         return -EIO;
11285 }
11286
11287 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11288  * successfully restored
11289  */
11290 static int tg3_test_msi(struct tg3 *tp)
11291 {
11292         int err;
11293         u16 pci_cmd;
11294
11295         if (!tg3_flag(tp, USING_MSI))
11296                 return 0;
11297
11298         /* Turn off SERR reporting in case MSI terminates with Master
11299          * Abort.
11300          */
11301         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11302         pci_write_config_word(tp->pdev, PCI_COMMAND,
11303                               pci_cmd & ~PCI_COMMAND_SERR);
11304
11305         err = tg3_test_interrupt(tp);
11306
11307         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11308
11309         if (!err)
11310                 return 0;
11311
11312         /* other failures */
11313         if (err != -EIO)
11314                 return err;
11315
11316         /* MSI test failed, go back to INTx mode */
11317         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11318                     "to INTx mode. Please report this failure to the PCI "
11319                     "maintainer and include system chipset information\n");
11320
11321         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11322
11323         pci_disable_msi(tp->pdev);
11324
11325         tg3_flag_clear(tp, USING_MSI);
11326         tp->napi[0].irq_vec = tp->pdev->irq;
11327
11328         err = tg3_request_irq(tp, 0);
11329         if (err)
11330                 return err;
11331
11332         /* Need to reset the chip because the MSI cycle may have terminated
11333          * with Master Abort.
11334          */
11335         tg3_full_lock(tp, 1);
11336
11337         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11338         err = tg3_init_hw(tp, true);
11339
11340         tg3_full_unlock(tp);
11341
11342         if (err)
11343                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11344
11345         return err;
11346 }
11347
11348 static int tg3_request_firmware(struct tg3 *tp)
11349 {
11350         const struct tg3_firmware_hdr *fw_hdr;
11351
11352         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11353                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11354                            tp->fw_needed);
11355                 return -ENOENT;
11356         }
11357
11358         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11359
11360         /* Firmware blob starts with version numbers, followed by
11361          * start address and _full_ length including BSS sections
11362          * (which must be longer than the actual data, of course
11363          */
11364
11365         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11366         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11367                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11368                            tp->fw_len, tp->fw_needed);
11369                 release_firmware(tp->fw);
11370                 tp->fw = NULL;
11371                 return -EINVAL;
11372         }
11373
11374         /* We no longer need firmware; we have it. */
11375         tp->fw_needed = NULL;
11376         return 0;
11377 }
11378
11379 static u32 tg3_irq_count(struct tg3 *tp)
11380 {
11381         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11382
11383         if (irq_cnt > 1) {
11384                 /* We want as many rx rings enabled as there are cpus.
11385                  * In multiqueue MSI-X mode, the first MSI-X vector
11386                  * only deals with link interrupts, etc, so we add
11387                  * one to the number of vectors we are requesting.
11388                  */
11389                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11390         }
11391
11392         return irq_cnt;
11393 }
11394
11395 static bool tg3_enable_msix(struct tg3 *tp)
11396 {
11397         int i, rc;
11398         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11399
11400         tp->txq_cnt = tp->txq_req;
11401         tp->rxq_cnt = tp->rxq_req;
11402         if (!tp->rxq_cnt)
11403                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11404         if (tp->rxq_cnt > tp->rxq_max)
11405                 tp->rxq_cnt = tp->rxq_max;
11406
11407         /* Disable multiple TX rings by default.  Simple round-robin hardware
11408          * scheduling of the TX rings can cause starvation of rings with
11409          * small packets when other rings have TSO or jumbo packets.
11410          */
11411         if (!tp->txq_req)
11412                 tp->txq_cnt = 1;
11413
11414         tp->irq_cnt = tg3_irq_count(tp);
11415
11416         for (i = 0; i < tp->irq_max; i++) {
11417                 msix_ent[i].entry  = i;
11418                 msix_ent[i].vector = 0;
11419         }
11420
11421         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11422         if (rc < 0) {
11423                 return false;
11424         } else if (rc < tp->irq_cnt) {
11425                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11426                               tp->irq_cnt, rc);
11427                 tp->irq_cnt = rc;
11428                 tp->rxq_cnt = max(rc - 1, 1);
11429                 if (tp->txq_cnt)
11430                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11431         }
11432
11433         for (i = 0; i < tp->irq_max; i++)
11434                 tp->napi[i].irq_vec = msix_ent[i].vector;
11435
11436         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11437                 pci_disable_msix(tp->pdev);
11438                 return false;
11439         }
11440
11441         if (tp->irq_cnt == 1)
11442                 return true;
11443
11444         tg3_flag_set(tp, ENABLE_RSS);
11445
11446         if (tp->txq_cnt > 1)
11447                 tg3_flag_set(tp, ENABLE_TSS);
11448
11449         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11450
11451         return true;
11452 }
11453
11454 static void tg3_ints_init(struct tg3 *tp)
11455 {
11456         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11457             !tg3_flag(tp, TAGGED_STATUS)) {
11458                 /* All MSI supporting chips should support tagged
11459                  * status.  Assert that this is the case.
11460                  */
11461                 netdev_warn(tp->dev,
11462                             "MSI without TAGGED_STATUS? Not using MSI\n");
11463                 goto defcfg;
11464         }
11465
11466         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11467                 tg3_flag_set(tp, USING_MSIX);
11468         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11469                 tg3_flag_set(tp, USING_MSI);
11470
11471         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11472                 u32 msi_mode = tr32(MSGINT_MODE);
11473                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11474                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11475                 if (!tg3_flag(tp, 1SHOT_MSI))
11476                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11477                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11478         }
11479 defcfg:
11480         if (!tg3_flag(tp, USING_MSIX)) {
11481                 tp->irq_cnt = 1;
11482                 tp->napi[0].irq_vec = tp->pdev->irq;
11483         }
11484
11485         if (tp->irq_cnt == 1) {
11486                 tp->txq_cnt = 1;
11487                 tp->rxq_cnt = 1;
11488                 netif_set_real_num_tx_queues(tp->dev, 1);
11489                 netif_set_real_num_rx_queues(tp->dev, 1);
11490         }
11491 }
11492
11493 static void tg3_ints_fini(struct tg3 *tp)
11494 {
11495         if (tg3_flag(tp, USING_MSIX))
11496                 pci_disable_msix(tp->pdev);
11497         else if (tg3_flag(tp, USING_MSI))
11498                 pci_disable_msi(tp->pdev);
11499         tg3_flag_clear(tp, USING_MSI);
11500         tg3_flag_clear(tp, USING_MSIX);
11501         tg3_flag_clear(tp, ENABLE_RSS);
11502         tg3_flag_clear(tp, ENABLE_TSS);
11503 }
11504
11505 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11506                      bool init)
11507 {
11508         struct net_device *dev = tp->dev;
11509         int i, err;
11510
11511         /*
11512          * Setup interrupts first so we know how
11513          * many NAPI resources to allocate
11514          */
11515         tg3_ints_init(tp);
11516
11517         tg3_rss_check_indir_tbl(tp);
11518
11519         /* The placement of this call is tied
11520          * to the setup and use of Host TX descriptors.
11521          */
11522         err = tg3_alloc_consistent(tp);
11523         if (err)
11524                 goto out_ints_fini;
11525
11526         tg3_napi_init(tp);
11527
11528         tg3_napi_enable(tp);
11529
11530         for (i = 0; i < tp->irq_cnt; i++) {
11531                 struct tg3_napi *tnapi = &tp->napi[i];
11532                 err = tg3_request_irq(tp, i);
11533                 if (err) {
11534                         for (i--; i >= 0; i--) {
11535                                 tnapi = &tp->napi[i];
11536                                 free_irq(tnapi->irq_vec, tnapi);
11537                         }
11538                         goto out_napi_fini;
11539                 }
11540         }
11541
11542         tg3_full_lock(tp, 0);
11543
11544         if (init)
11545                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11546
11547         err = tg3_init_hw(tp, reset_phy);
11548         if (err) {
11549                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11550                 tg3_free_rings(tp);
11551         }
11552
11553         tg3_full_unlock(tp);
11554
11555         if (err)
11556                 goto out_free_irq;
11557
11558         if (test_irq && tg3_flag(tp, USING_MSI)) {
11559                 err = tg3_test_msi(tp);
11560
11561                 if (err) {
11562                         tg3_full_lock(tp, 0);
11563                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11564                         tg3_free_rings(tp);
11565                         tg3_full_unlock(tp);
11566
11567                         goto out_napi_fini;
11568                 }
11569
11570                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11571                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11572
11573                         tw32(PCIE_TRANSACTION_CFG,
11574                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11575                 }
11576         }
11577
11578         tg3_phy_start(tp);
11579
11580         tg3_hwmon_open(tp);
11581
11582         tg3_full_lock(tp, 0);
11583
11584         tg3_timer_start(tp);
11585         tg3_flag_set(tp, INIT_COMPLETE);
11586         tg3_enable_ints(tp);
11587
11588         tg3_ptp_resume(tp);
11589
11590         tg3_full_unlock(tp);
11591
11592         netif_tx_start_all_queues(dev);
11593
11594         /*
11595          * Reset loopback feature if it was turned on while the device was down
11596          * make sure that it's installed properly now.
11597          */
11598         if (dev->features & NETIF_F_LOOPBACK)
11599                 tg3_set_loopback(dev, dev->features);
11600
11601         return 0;
11602
11603 out_free_irq:
11604         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11605                 struct tg3_napi *tnapi = &tp->napi[i];
11606                 free_irq(tnapi->irq_vec, tnapi);
11607         }
11608
11609 out_napi_fini:
11610         tg3_napi_disable(tp);
11611         tg3_napi_fini(tp);
11612         tg3_free_consistent(tp);
11613
11614 out_ints_fini:
11615         tg3_ints_fini(tp);
11616
11617         return err;
11618 }
11619
11620 static void tg3_stop(struct tg3 *tp)
11621 {
11622         int i;
11623
11624         tg3_reset_task_cancel(tp);
11625         tg3_netif_stop(tp);
11626
11627         tg3_timer_stop(tp);
11628
11629         tg3_hwmon_close(tp);
11630
11631         tg3_phy_stop(tp);
11632
11633         tg3_full_lock(tp, 1);
11634
11635         tg3_disable_ints(tp);
11636
11637         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11638         tg3_free_rings(tp);
11639         tg3_flag_clear(tp, INIT_COMPLETE);
11640
11641         tg3_full_unlock(tp);
11642
11643         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11644                 struct tg3_napi *tnapi = &tp->napi[i];
11645                 free_irq(tnapi->irq_vec, tnapi);
11646         }
11647
11648         tg3_ints_fini(tp);
11649
11650         tg3_napi_fini(tp);
11651
11652         tg3_free_consistent(tp);
11653 }
11654
11655 static int tg3_open(struct net_device *dev)
11656 {
11657         struct tg3 *tp = netdev_priv(dev);
11658         int err;
11659
11660         if (tp->pcierr_recovery) {
11661                 netdev_err(dev, "Failed to open device. PCI error recovery "
11662                            "in progress\n");
11663                 return -EAGAIN;
11664         }
11665
11666         if (tp->fw_needed) {
11667                 err = tg3_request_firmware(tp);
11668                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11669                         if (err) {
11670                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11671                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11672                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11673                                 netdev_warn(tp->dev, "EEE capability restored\n");
11674                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11675                         }
11676                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11677                         if (err)
11678                                 return err;
11679                 } else if (err) {
11680                         netdev_warn(tp->dev, "TSO capability disabled\n");
11681                         tg3_flag_clear(tp, TSO_CAPABLE);
11682                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11683                         netdev_notice(tp->dev, "TSO capability restored\n");
11684                         tg3_flag_set(tp, TSO_CAPABLE);
11685                 }
11686         }
11687
11688         tg3_carrier_off(tp);
11689
11690         err = tg3_power_up(tp);
11691         if (err)
11692                 return err;
11693
11694         tg3_full_lock(tp, 0);
11695
11696         tg3_disable_ints(tp);
11697         tg3_flag_clear(tp, INIT_COMPLETE);
11698
11699         tg3_full_unlock(tp);
11700
11701         err = tg3_start(tp,
11702                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11703                         true, true);
11704         if (err) {
11705                 tg3_frob_aux_power(tp, false);
11706                 pci_set_power_state(tp->pdev, PCI_D3hot);
11707         }
11708
11709         return err;
11710 }
11711
11712 static int tg3_close(struct net_device *dev)
11713 {
11714         struct tg3 *tp = netdev_priv(dev);
11715
11716         if (tp->pcierr_recovery) {
11717                 netdev_err(dev, "Failed to close device. PCI error recovery "
11718                            "in progress\n");
11719                 return -EAGAIN;
11720         }
11721
11722         tg3_stop(tp);
11723
11724         /* Clear stats across close / open calls */
11725         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11726         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11727
11728         if (pci_device_is_present(tp->pdev)) {
11729                 tg3_power_down_prepare(tp);
11730
11731                 tg3_carrier_off(tp);
11732         }
11733         return 0;
11734 }
11735
11736 static inline u64 get_stat64(tg3_stat64_t *val)
11737 {
11738        return ((u64)val->high << 32) | ((u64)val->low);
11739 }
11740
11741 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11742 {
11743         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11744
11745         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11746             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11747              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11748                 u32 val;
11749
11750                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11751                         tg3_writephy(tp, MII_TG3_TEST1,
11752                                      val | MII_TG3_TEST1_CRC_EN);
11753                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11754                 } else
11755                         val = 0;
11756
11757                 tp->phy_crc_errors += val;
11758
11759                 return tp->phy_crc_errors;
11760         }
11761
11762         return get_stat64(&hw_stats->rx_fcs_errors);
11763 }
11764
11765 #define ESTAT_ADD(member) \
11766         estats->member =        old_estats->member + \
11767                                 get_stat64(&hw_stats->member)
11768
11769 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11770 {
11771         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11772         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11773
11774         ESTAT_ADD(rx_octets);
11775         ESTAT_ADD(rx_fragments);
11776         ESTAT_ADD(rx_ucast_packets);
11777         ESTAT_ADD(rx_mcast_packets);
11778         ESTAT_ADD(rx_bcast_packets);
11779         ESTAT_ADD(rx_fcs_errors);
11780         ESTAT_ADD(rx_align_errors);
11781         ESTAT_ADD(rx_xon_pause_rcvd);
11782         ESTAT_ADD(rx_xoff_pause_rcvd);
11783         ESTAT_ADD(rx_mac_ctrl_rcvd);
11784         ESTAT_ADD(rx_xoff_entered);
11785         ESTAT_ADD(rx_frame_too_long_errors);
11786         ESTAT_ADD(rx_jabbers);
11787         ESTAT_ADD(rx_undersize_packets);
11788         ESTAT_ADD(rx_in_length_errors);
11789         ESTAT_ADD(rx_out_length_errors);
11790         ESTAT_ADD(rx_64_or_less_octet_packets);
11791         ESTAT_ADD(rx_65_to_127_octet_packets);
11792         ESTAT_ADD(rx_128_to_255_octet_packets);
11793         ESTAT_ADD(rx_256_to_511_octet_packets);
11794         ESTAT_ADD(rx_512_to_1023_octet_packets);
11795         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11796         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11797         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11798         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11799         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11800
11801         ESTAT_ADD(tx_octets);
11802         ESTAT_ADD(tx_collisions);
11803         ESTAT_ADD(tx_xon_sent);
11804         ESTAT_ADD(tx_xoff_sent);
11805         ESTAT_ADD(tx_flow_control);
11806         ESTAT_ADD(tx_mac_errors);
11807         ESTAT_ADD(tx_single_collisions);
11808         ESTAT_ADD(tx_mult_collisions);
11809         ESTAT_ADD(tx_deferred);
11810         ESTAT_ADD(tx_excessive_collisions);
11811         ESTAT_ADD(tx_late_collisions);
11812         ESTAT_ADD(tx_collide_2times);
11813         ESTAT_ADD(tx_collide_3times);
11814         ESTAT_ADD(tx_collide_4times);
11815         ESTAT_ADD(tx_collide_5times);
11816         ESTAT_ADD(tx_collide_6times);
11817         ESTAT_ADD(tx_collide_7times);
11818         ESTAT_ADD(tx_collide_8times);
11819         ESTAT_ADD(tx_collide_9times);
11820         ESTAT_ADD(tx_collide_10times);
11821         ESTAT_ADD(tx_collide_11times);
11822         ESTAT_ADD(tx_collide_12times);
11823         ESTAT_ADD(tx_collide_13times);
11824         ESTAT_ADD(tx_collide_14times);
11825         ESTAT_ADD(tx_collide_15times);
11826         ESTAT_ADD(tx_ucast_packets);
11827         ESTAT_ADD(tx_mcast_packets);
11828         ESTAT_ADD(tx_bcast_packets);
11829         ESTAT_ADD(tx_carrier_sense_errors);
11830         ESTAT_ADD(tx_discards);
11831         ESTAT_ADD(tx_errors);
11832
11833         ESTAT_ADD(dma_writeq_full);
11834         ESTAT_ADD(dma_write_prioq_full);
11835         ESTAT_ADD(rxbds_empty);
11836         ESTAT_ADD(rx_discards);
11837         ESTAT_ADD(rx_errors);
11838         ESTAT_ADD(rx_threshold_hit);
11839
11840         ESTAT_ADD(dma_readq_full);
11841         ESTAT_ADD(dma_read_prioq_full);
11842         ESTAT_ADD(tx_comp_queue_full);
11843
11844         ESTAT_ADD(ring_set_send_prod_index);
11845         ESTAT_ADD(ring_status_update);
11846         ESTAT_ADD(nic_irqs);
11847         ESTAT_ADD(nic_avoided_irqs);
11848         ESTAT_ADD(nic_tx_threshold_hit);
11849
11850         ESTAT_ADD(mbuf_lwm_thresh_hit);
11851 }
11852
11853 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11854 {
11855         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11856         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11857
11858         stats->rx_packets = old_stats->rx_packets +
11859                 get_stat64(&hw_stats->rx_ucast_packets) +
11860                 get_stat64(&hw_stats->rx_mcast_packets) +
11861                 get_stat64(&hw_stats->rx_bcast_packets);
11862
11863         stats->tx_packets = old_stats->tx_packets +
11864                 get_stat64(&hw_stats->tx_ucast_packets) +
11865                 get_stat64(&hw_stats->tx_mcast_packets) +
11866                 get_stat64(&hw_stats->tx_bcast_packets);
11867
11868         stats->rx_bytes = old_stats->rx_bytes +
11869                 get_stat64(&hw_stats->rx_octets);
11870         stats->tx_bytes = old_stats->tx_bytes +
11871                 get_stat64(&hw_stats->tx_octets);
11872
11873         stats->rx_errors = old_stats->rx_errors +
11874                 get_stat64(&hw_stats->rx_errors);
11875         stats->tx_errors = old_stats->tx_errors +
11876                 get_stat64(&hw_stats->tx_errors) +
11877                 get_stat64(&hw_stats->tx_mac_errors) +
11878                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11879                 get_stat64(&hw_stats->tx_discards);
11880
11881         stats->multicast = old_stats->multicast +
11882                 get_stat64(&hw_stats->rx_mcast_packets);
11883         stats->collisions = old_stats->collisions +
11884                 get_stat64(&hw_stats->tx_collisions);
11885
11886         stats->rx_length_errors = old_stats->rx_length_errors +
11887                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11888                 get_stat64(&hw_stats->rx_undersize_packets);
11889
11890         stats->rx_frame_errors = old_stats->rx_frame_errors +
11891                 get_stat64(&hw_stats->rx_align_errors);
11892         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11893                 get_stat64(&hw_stats->tx_discards);
11894         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11895                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11896
11897         stats->rx_crc_errors = old_stats->rx_crc_errors +
11898                 tg3_calc_crc_errors(tp);
11899
11900         stats->rx_missed_errors = old_stats->rx_missed_errors +
11901                 get_stat64(&hw_stats->rx_discards);
11902
11903         stats->rx_dropped = tp->rx_dropped;
11904         stats->tx_dropped = tp->tx_dropped;
11905 }
11906
11907 static int tg3_get_regs_len(struct net_device *dev)
11908 {
11909         return TG3_REG_BLK_SIZE;
11910 }
11911
11912 static void tg3_get_regs(struct net_device *dev,
11913                 struct ethtool_regs *regs, void *_p)
11914 {
11915         struct tg3 *tp = netdev_priv(dev);
11916
11917         regs->version = 0;
11918
11919         memset(_p, 0, TG3_REG_BLK_SIZE);
11920
11921         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11922                 return;
11923
11924         tg3_full_lock(tp, 0);
11925
11926         tg3_dump_legacy_regs(tp, (u32 *)_p);
11927
11928         tg3_full_unlock(tp);
11929 }
11930
11931 static int tg3_get_eeprom_len(struct net_device *dev)
11932 {
11933         struct tg3 *tp = netdev_priv(dev);
11934
11935         return tp->nvram_size;
11936 }
11937
11938 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11939 {
11940         struct tg3 *tp = netdev_priv(dev);
11941         int ret, cpmu_restore = 0;
11942         u8  *pd;
11943         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11944         __be32 val;
11945
11946         if (tg3_flag(tp, NO_NVRAM))
11947                 return -EINVAL;
11948
11949         offset = eeprom->offset;
11950         len = eeprom->len;
11951         eeprom->len = 0;
11952
11953         eeprom->magic = TG3_EEPROM_MAGIC;
11954
11955         /* Override clock, link aware and link idle modes */
11956         if (tg3_flag(tp, CPMU_PRESENT)) {
11957                 cpmu_val = tr32(TG3_CPMU_CTRL);
11958                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11959                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11960                         tw32(TG3_CPMU_CTRL, cpmu_val &
11961                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11962                                              CPMU_CTRL_LINK_IDLE_MODE));
11963                         cpmu_restore = 1;
11964                 }
11965         }
11966         tg3_override_clk(tp);
11967
11968         if (offset & 3) {
11969                 /* adjustments to start on required 4 byte boundary */
11970                 b_offset = offset & 3;
11971                 b_count = 4 - b_offset;
11972                 if (b_count > len) {
11973                         /* i.e. offset=1 len=2 */
11974                         b_count = len;
11975                 }
11976                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11977                 if (ret)
11978                         goto eeprom_done;
11979                 memcpy(data, ((char *)&val) + b_offset, b_count);
11980                 len -= b_count;
11981                 offset += b_count;
11982                 eeprom->len += b_count;
11983         }
11984
11985         /* read bytes up to the last 4 byte boundary */
11986         pd = &data[eeprom->len];
11987         for (i = 0; i < (len - (len & 3)); i += 4) {
11988                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11989                 if (ret) {
11990                         if (i)
11991                                 i -= 4;
11992                         eeprom->len += i;
11993                         goto eeprom_done;
11994                 }
11995                 memcpy(pd + i, &val, 4);
11996                 if (need_resched()) {
11997                         if (signal_pending(current)) {
11998                                 eeprom->len += i;
11999                                 ret = -EINTR;
12000                                 goto eeprom_done;
12001                         }
12002                         cond_resched();
12003                 }
12004         }
12005         eeprom->len += i;
12006
12007         if (len & 3) {
12008                 /* read last bytes not ending on 4 byte boundary */
12009                 pd = &data[eeprom->len];
12010                 b_count = len & 3;
12011                 b_offset = offset + len - b_count;
12012                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12013                 if (ret)
12014                         goto eeprom_done;
12015                 memcpy(pd, &val, b_count);
12016                 eeprom->len += b_count;
12017         }
12018         ret = 0;
12019
12020 eeprom_done:
12021         /* Restore clock, link aware and link idle modes */
12022         tg3_restore_clk(tp);
12023         if (cpmu_restore)
12024                 tw32(TG3_CPMU_CTRL, cpmu_val);
12025
12026         return ret;
12027 }
12028
12029 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12030 {
12031         struct tg3 *tp = netdev_priv(dev);
12032         int ret;
12033         u32 offset, len, b_offset, odd_len;
12034         u8 *buf;
12035         __be32 start = 0, end;
12036
12037         if (tg3_flag(tp, NO_NVRAM) ||
12038             eeprom->magic != TG3_EEPROM_MAGIC)
12039                 return -EINVAL;
12040
12041         offset = eeprom->offset;
12042         len = eeprom->len;
12043
12044         if ((b_offset = (offset & 3))) {
12045                 /* adjustments to start on required 4 byte boundary */
12046                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12047                 if (ret)
12048                         return ret;
12049                 len += b_offset;
12050                 offset &= ~3;
12051                 if (len < 4)
12052                         len = 4;
12053         }
12054
12055         odd_len = 0;
12056         if (len & 3) {
12057                 /* adjustments to end on required 4 byte boundary */
12058                 odd_len = 1;
12059                 len = (len + 3) & ~3;
12060                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12061                 if (ret)
12062                         return ret;
12063         }
12064
12065         buf = data;
12066         if (b_offset || odd_len) {
12067                 buf = kmalloc(len, GFP_KERNEL);
12068                 if (!buf)
12069                         return -ENOMEM;
12070                 if (b_offset)
12071                         memcpy(buf, &start, 4);
12072                 if (odd_len)
12073                         memcpy(buf+len-4, &end, 4);
12074                 memcpy(buf + b_offset, data, eeprom->len);
12075         }
12076
12077         ret = tg3_nvram_write_block(tp, offset, len, buf);
12078
12079         if (buf != data)
12080                 kfree(buf);
12081
12082         return ret;
12083 }
12084
12085 static int tg3_get_link_ksettings(struct net_device *dev,
12086                                   struct ethtool_link_ksettings *cmd)
12087 {
12088         struct tg3 *tp = netdev_priv(dev);
12089         u32 supported, advertising;
12090
12091         if (tg3_flag(tp, USE_PHYLIB)) {
12092                 struct phy_device *phydev;
12093                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12094                         return -EAGAIN;
12095                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12096                 return phy_ethtool_ksettings_get(phydev, cmd);
12097         }
12098
12099         supported = (SUPPORTED_Autoneg);
12100
12101         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12102                 supported |= (SUPPORTED_1000baseT_Half |
12103                               SUPPORTED_1000baseT_Full);
12104
12105         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12106                 supported |= (SUPPORTED_100baseT_Half |
12107                               SUPPORTED_100baseT_Full |
12108                               SUPPORTED_10baseT_Half |
12109                               SUPPORTED_10baseT_Full |
12110                               SUPPORTED_TP);
12111                 cmd->base.port = PORT_TP;
12112         } else {
12113                 supported |= SUPPORTED_FIBRE;
12114                 cmd->base.port = PORT_FIBRE;
12115         }
12116         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12117                                                 supported);
12118
12119         advertising = tp->link_config.advertising;
12120         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12121                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12122                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12123                                 advertising |= ADVERTISED_Pause;
12124                         } else {
12125                                 advertising |= ADVERTISED_Pause |
12126                                         ADVERTISED_Asym_Pause;
12127                         }
12128                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12129                         advertising |= ADVERTISED_Asym_Pause;
12130                 }
12131         }
12132         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12133                                                 advertising);
12134
12135         if (netif_running(dev) && tp->link_up) {
12136                 cmd->base.speed = tp->link_config.active_speed;
12137                 cmd->base.duplex = tp->link_config.active_duplex;
12138                 ethtool_convert_legacy_u32_to_link_mode(
12139                         cmd->link_modes.lp_advertising,
12140                         tp->link_config.rmt_adv);
12141
12142                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12143                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12144                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12145                         else
12146                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12147                 }
12148         } else {
12149                 cmd->base.speed = SPEED_UNKNOWN;
12150                 cmd->base.duplex = DUPLEX_UNKNOWN;
12151                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12152         }
12153         cmd->base.phy_address = tp->phy_addr;
12154         cmd->base.autoneg = tp->link_config.autoneg;
12155         return 0;
12156 }
12157
12158 static int tg3_set_link_ksettings(struct net_device *dev,
12159                                   const struct ethtool_link_ksettings *cmd)
12160 {
12161         struct tg3 *tp = netdev_priv(dev);
12162         u32 speed = cmd->base.speed;
12163         u32 advertising;
12164
12165         if (tg3_flag(tp, USE_PHYLIB)) {
12166                 struct phy_device *phydev;
12167                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12168                         return -EAGAIN;
12169                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12170                 return phy_ethtool_ksettings_set(phydev, cmd);
12171         }
12172
12173         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12174             cmd->base.autoneg != AUTONEG_DISABLE)
12175                 return -EINVAL;
12176
12177         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12178             cmd->base.duplex != DUPLEX_FULL &&
12179             cmd->base.duplex != DUPLEX_HALF)
12180                 return -EINVAL;
12181
12182         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12183                                                 cmd->link_modes.advertising);
12184
12185         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12186                 u32 mask = ADVERTISED_Autoneg |
12187                            ADVERTISED_Pause |
12188                            ADVERTISED_Asym_Pause;
12189
12190                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12191                         mask |= ADVERTISED_1000baseT_Half |
12192                                 ADVERTISED_1000baseT_Full;
12193
12194                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12195                         mask |= ADVERTISED_100baseT_Half |
12196                                 ADVERTISED_100baseT_Full |
12197                                 ADVERTISED_10baseT_Half |
12198                                 ADVERTISED_10baseT_Full |
12199                                 ADVERTISED_TP;
12200                 else
12201                         mask |= ADVERTISED_FIBRE;
12202
12203                 if (advertising & ~mask)
12204                         return -EINVAL;
12205
12206                 mask &= (ADVERTISED_1000baseT_Half |
12207                          ADVERTISED_1000baseT_Full |
12208                          ADVERTISED_100baseT_Half |
12209                          ADVERTISED_100baseT_Full |
12210                          ADVERTISED_10baseT_Half |
12211                          ADVERTISED_10baseT_Full);
12212
12213                 advertising &= mask;
12214         } else {
12215                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12216                         if (speed != SPEED_1000)
12217                                 return -EINVAL;
12218
12219                         if (cmd->base.duplex != DUPLEX_FULL)
12220                                 return -EINVAL;
12221                 } else {
12222                         if (speed != SPEED_100 &&
12223                             speed != SPEED_10)
12224                                 return -EINVAL;
12225                 }
12226         }
12227
12228         tg3_full_lock(tp, 0);
12229
12230         tp->link_config.autoneg = cmd->base.autoneg;
12231         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12232                 tp->link_config.advertising = (advertising |
12233                                               ADVERTISED_Autoneg);
12234                 tp->link_config.speed = SPEED_UNKNOWN;
12235                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12236         } else {
12237                 tp->link_config.advertising = 0;
12238                 tp->link_config.speed = speed;
12239                 tp->link_config.duplex = cmd->base.duplex;
12240         }
12241
12242         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12243
12244         tg3_warn_mgmt_link_flap(tp);
12245
12246         if (netif_running(dev))
12247                 tg3_setup_phy(tp, true);
12248
12249         tg3_full_unlock(tp);
12250
12251         return 0;
12252 }
12253
12254 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12255 {
12256         struct tg3 *tp = netdev_priv(dev);
12257
12258         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12259         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12260         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12261         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12262 }
12263
12264 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12265 {
12266         struct tg3 *tp = netdev_priv(dev);
12267
12268         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12269                 wol->supported = WAKE_MAGIC;
12270         else
12271                 wol->supported = 0;
12272         wol->wolopts = 0;
12273         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12274                 wol->wolopts = WAKE_MAGIC;
12275         memset(&wol->sopass, 0, sizeof(wol->sopass));
12276 }
12277
12278 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12279 {
12280         struct tg3 *tp = netdev_priv(dev);
12281         struct device *dp = &tp->pdev->dev;
12282
12283         if (wol->wolopts & ~WAKE_MAGIC)
12284                 return -EINVAL;
12285         if ((wol->wolopts & WAKE_MAGIC) &&
12286             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12287                 return -EINVAL;
12288
12289         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12290
12291         if (device_may_wakeup(dp))
12292                 tg3_flag_set(tp, WOL_ENABLE);
12293         else
12294                 tg3_flag_clear(tp, WOL_ENABLE);
12295
12296         return 0;
12297 }
12298
12299 static u32 tg3_get_msglevel(struct net_device *dev)
12300 {
12301         struct tg3 *tp = netdev_priv(dev);
12302         return tp->msg_enable;
12303 }
12304
12305 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12306 {
12307         struct tg3 *tp = netdev_priv(dev);
12308         tp->msg_enable = value;
12309 }
12310
12311 static int tg3_nway_reset(struct net_device *dev)
12312 {
12313         struct tg3 *tp = netdev_priv(dev);
12314         int r;
12315
12316         if (!netif_running(dev))
12317                 return -EAGAIN;
12318
12319         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12320                 return -EINVAL;
12321
12322         tg3_warn_mgmt_link_flap(tp);
12323
12324         if (tg3_flag(tp, USE_PHYLIB)) {
12325                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12326                         return -EAGAIN;
12327                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12328         } else {
12329                 u32 bmcr;
12330
12331                 spin_lock_bh(&tp->lock);
12332                 r = -EINVAL;
12333                 tg3_readphy(tp, MII_BMCR, &bmcr);
12334                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12335                     ((bmcr & BMCR_ANENABLE) ||
12336                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12337                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12338                                                    BMCR_ANENABLE);
12339                         r = 0;
12340                 }
12341                 spin_unlock_bh(&tp->lock);
12342         }
12343
12344         return r;
12345 }
12346
12347 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12348 {
12349         struct tg3 *tp = netdev_priv(dev);
12350
12351         ering->rx_max_pending = tp->rx_std_ring_mask;
12352         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12353                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12354         else
12355                 ering->rx_jumbo_max_pending = 0;
12356
12357         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12358
12359         ering->rx_pending = tp->rx_pending;
12360         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12361                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12362         else
12363                 ering->rx_jumbo_pending = 0;
12364
12365         ering->tx_pending = tp->napi[0].tx_pending;
12366 }
12367
12368 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12369 {
12370         struct tg3 *tp = netdev_priv(dev);
12371         int i, irq_sync = 0, err = 0;
12372
12373         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12374             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12375             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12376             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12377             (tg3_flag(tp, TSO_BUG) &&
12378              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12379                 return -EINVAL;
12380
12381         if (netif_running(dev)) {
12382                 tg3_phy_stop(tp);
12383                 tg3_netif_stop(tp);
12384                 irq_sync = 1;
12385         }
12386
12387         tg3_full_lock(tp, irq_sync);
12388
12389         tp->rx_pending = ering->rx_pending;
12390
12391         if (tg3_flag(tp, MAX_RXPEND_64) &&
12392             tp->rx_pending > 63)
12393                 tp->rx_pending = 63;
12394
12395         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12396                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12397
12398         for (i = 0; i < tp->irq_max; i++)
12399                 tp->napi[i].tx_pending = ering->tx_pending;
12400
12401         if (netif_running(dev)) {
12402                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12403                 err = tg3_restart_hw(tp, false);
12404                 if (!err)
12405                         tg3_netif_start(tp);
12406         }
12407
12408         tg3_full_unlock(tp);
12409
12410         if (irq_sync && !err)
12411                 tg3_phy_start(tp);
12412
12413         return err;
12414 }
12415
12416 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12417 {
12418         struct tg3 *tp = netdev_priv(dev);
12419
12420         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12421
12422         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12423                 epause->rx_pause = 1;
12424         else
12425                 epause->rx_pause = 0;
12426
12427         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12428                 epause->tx_pause = 1;
12429         else
12430                 epause->tx_pause = 0;
12431 }
12432
12433 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12434 {
12435         struct tg3 *tp = netdev_priv(dev);
12436         int err = 0;
12437
12438         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12439                 tg3_warn_mgmt_link_flap(tp);
12440
12441         if (tg3_flag(tp, USE_PHYLIB)) {
12442                 u32 newadv;
12443                 struct phy_device *phydev;
12444
12445                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12446
12447                 if (!(phydev->supported & SUPPORTED_Pause) ||
12448                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12449                      (epause->rx_pause != epause->tx_pause)))
12450                         return -EINVAL;
12451
12452                 tp->link_config.flowctrl = 0;
12453                 if (epause->rx_pause) {
12454                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12455
12456                         if (epause->tx_pause) {
12457                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12458                                 newadv = ADVERTISED_Pause;
12459                         } else
12460                                 newadv = ADVERTISED_Pause |
12461                                          ADVERTISED_Asym_Pause;
12462                 } else if (epause->tx_pause) {
12463                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12464                         newadv = ADVERTISED_Asym_Pause;
12465                 } else
12466                         newadv = 0;
12467
12468                 if (epause->autoneg)
12469                         tg3_flag_set(tp, PAUSE_AUTONEG);
12470                 else
12471                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12472
12473                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12474                         u32 oldadv = phydev->advertising &
12475                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12476                         if (oldadv != newadv) {
12477                                 phydev->advertising &=
12478                                         ~(ADVERTISED_Pause |
12479                                           ADVERTISED_Asym_Pause);
12480                                 phydev->advertising |= newadv;
12481                                 if (phydev->autoneg) {
12482                                         /*
12483                                          * Always renegotiate the link to
12484                                          * inform our link partner of our
12485                                          * flow control settings, even if the
12486                                          * flow control is forced.  Let
12487                                          * tg3_adjust_link() do the final
12488                                          * flow control setup.
12489                                          */
12490                                         return phy_start_aneg(phydev);
12491                                 }
12492                         }
12493
12494                         if (!epause->autoneg)
12495                                 tg3_setup_flow_control(tp, 0, 0);
12496                 } else {
12497                         tp->link_config.advertising &=
12498                                         ~(ADVERTISED_Pause |
12499                                           ADVERTISED_Asym_Pause);
12500                         tp->link_config.advertising |= newadv;
12501                 }
12502         } else {
12503                 int irq_sync = 0;
12504
12505                 if (netif_running(dev)) {
12506                         tg3_netif_stop(tp);
12507                         irq_sync = 1;
12508                 }
12509
12510                 tg3_full_lock(tp, irq_sync);
12511
12512                 if (epause->autoneg)
12513                         tg3_flag_set(tp, PAUSE_AUTONEG);
12514                 else
12515                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12516                 if (epause->rx_pause)
12517                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12518                 else
12519                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12520                 if (epause->tx_pause)
12521                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12522                 else
12523                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12524
12525                 if (netif_running(dev)) {
12526                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12527                         err = tg3_restart_hw(tp, false);
12528                         if (!err)
12529                                 tg3_netif_start(tp);
12530                 }
12531
12532                 tg3_full_unlock(tp);
12533         }
12534
12535         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12536
12537         return err;
12538 }
12539
12540 static int tg3_get_sset_count(struct net_device *dev, int sset)
12541 {
12542         switch (sset) {
12543         case ETH_SS_TEST:
12544                 return TG3_NUM_TEST;
12545         case ETH_SS_STATS:
12546                 return TG3_NUM_STATS;
12547         default:
12548                 return -EOPNOTSUPP;
12549         }
12550 }
12551
12552 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12553                          u32 *rules __always_unused)
12554 {
12555         struct tg3 *tp = netdev_priv(dev);
12556
12557         if (!tg3_flag(tp, SUPPORT_MSIX))
12558                 return -EOPNOTSUPP;
12559
12560         switch (info->cmd) {
12561         case ETHTOOL_GRXRINGS:
12562                 if (netif_running(tp->dev))
12563                         info->data = tp->rxq_cnt;
12564                 else {
12565                         info->data = num_online_cpus();
12566                         if (info->data > TG3_RSS_MAX_NUM_QS)
12567                                 info->data = TG3_RSS_MAX_NUM_QS;
12568                 }
12569
12570                 return 0;
12571
12572         default:
12573                 return -EOPNOTSUPP;
12574         }
12575 }
12576
12577 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12578 {
12579         u32 size = 0;
12580         struct tg3 *tp = netdev_priv(dev);
12581
12582         if (tg3_flag(tp, SUPPORT_MSIX))
12583                 size = TG3_RSS_INDIR_TBL_SIZE;
12584
12585         return size;
12586 }
12587
12588 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12589 {
12590         struct tg3 *tp = netdev_priv(dev);
12591         int i;
12592
12593         if (hfunc)
12594                 *hfunc = ETH_RSS_HASH_TOP;
12595         if (!indir)
12596                 return 0;
12597
12598         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12599                 indir[i] = tp->rss_ind_tbl[i];
12600
12601         return 0;
12602 }
12603
12604 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12605                         const u8 hfunc)
12606 {
12607         struct tg3 *tp = netdev_priv(dev);
12608         size_t i;
12609
12610         /* We require at least one supported parameter to be changed and no
12611          * change in any of the unsupported parameters
12612          */
12613         if (key ||
12614             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12615                 return -EOPNOTSUPP;
12616
12617         if (!indir)
12618                 return 0;
12619
12620         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12621                 tp->rss_ind_tbl[i] = indir[i];
12622
12623         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12624                 return 0;
12625
12626         /* It is legal to write the indirection
12627          * table while the device is running.
12628          */
12629         tg3_full_lock(tp, 0);
12630         tg3_rss_write_indir_tbl(tp);
12631         tg3_full_unlock(tp);
12632
12633         return 0;
12634 }
12635
12636 static void tg3_get_channels(struct net_device *dev,
12637                              struct ethtool_channels *channel)
12638 {
12639         struct tg3 *tp = netdev_priv(dev);
12640         u32 deflt_qs = netif_get_num_default_rss_queues();
12641
12642         channel->max_rx = tp->rxq_max;
12643         channel->max_tx = tp->txq_max;
12644
12645         if (netif_running(dev)) {
12646                 channel->rx_count = tp->rxq_cnt;
12647                 channel->tx_count = tp->txq_cnt;
12648         } else {
12649                 if (tp->rxq_req)
12650                         channel->rx_count = tp->rxq_req;
12651                 else
12652                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12653
12654                 if (tp->txq_req)
12655                         channel->tx_count = tp->txq_req;
12656                 else
12657                         channel->tx_count = min(deflt_qs, tp->txq_max);
12658         }
12659 }
12660
12661 static int tg3_set_channels(struct net_device *dev,
12662                             struct ethtool_channels *channel)
12663 {
12664         struct tg3 *tp = netdev_priv(dev);
12665
12666         if (!tg3_flag(tp, SUPPORT_MSIX))
12667                 return -EOPNOTSUPP;
12668
12669         if (channel->rx_count > tp->rxq_max ||
12670             channel->tx_count > tp->txq_max)
12671                 return -EINVAL;
12672
12673         tp->rxq_req = channel->rx_count;
12674         tp->txq_req = channel->tx_count;
12675
12676         if (!netif_running(dev))
12677                 return 0;
12678
12679         tg3_stop(tp);
12680
12681         tg3_carrier_off(tp);
12682
12683         tg3_start(tp, true, false, false);
12684
12685         return 0;
12686 }
12687
12688 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12689 {
12690         switch (stringset) {
12691         case ETH_SS_STATS:
12692                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12693                 break;
12694         case ETH_SS_TEST:
12695                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12696                 break;
12697         default:
12698                 WARN_ON(1);     /* we need a WARN() */
12699                 break;
12700         }
12701 }
12702
12703 static int tg3_set_phys_id(struct net_device *dev,
12704                             enum ethtool_phys_id_state state)
12705 {
12706         struct tg3 *tp = netdev_priv(dev);
12707
12708         if (!netif_running(tp->dev))
12709                 return -EAGAIN;
12710
12711         switch (state) {
12712         case ETHTOOL_ID_ACTIVE:
12713                 return 1;       /* cycle on/off once per second */
12714
12715         case ETHTOOL_ID_ON:
12716                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12717                      LED_CTRL_1000MBPS_ON |
12718                      LED_CTRL_100MBPS_ON |
12719                      LED_CTRL_10MBPS_ON |
12720                      LED_CTRL_TRAFFIC_OVERRIDE |
12721                      LED_CTRL_TRAFFIC_BLINK |
12722                      LED_CTRL_TRAFFIC_LED);
12723                 break;
12724
12725         case ETHTOOL_ID_OFF:
12726                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12727                      LED_CTRL_TRAFFIC_OVERRIDE);
12728                 break;
12729
12730         case ETHTOOL_ID_INACTIVE:
12731                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12732                 break;
12733         }
12734
12735         return 0;
12736 }
12737
12738 static void tg3_get_ethtool_stats(struct net_device *dev,
12739                                    struct ethtool_stats *estats, u64 *tmp_stats)
12740 {
12741         struct tg3 *tp = netdev_priv(dev);
12742
12743         if (tp->hw_stats)
12744                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12745         else
12746                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12747 }
12748
12749 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12750 {
12751         int i;
12752         __be32 *buf;
12753         u32 offset = 0, len = 0;
12754         u32 magic, val;
12755
12756         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12757                 return NULL;
12758
12759         if (magic == TG3_EEPROM_MAGIC) {
12760                 for (offset = TG3_NVM_DIR_START;
12761                      offset < TG3_NVM_DIR_END;
12762                      offset += TG3_NVM_DIRENT_SIZE) {
12763                         if (tg3_nvram_read(tp, offset, &val))
12764                                 return NULL;
12765
12766                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12767                             TG3_NVM_DIRTYPE_EXTVPD)
12768                                 break;
12769                 }
12770
12771                 if (offset != TG3_NVM_DIR_END) {
12772                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12773                         if (tg3_nvram_read(tp, offset + 4, &offset))
12774                                 return NULL;
12775
12776                         offset = tg3_nvram_logical_addr(tp, offset);
12777                 }
12778         }
12779
12780         if (!offset || !len) {
12781                 offset = TG3_NVM_VPD_OFF;
12782                 len = TG3_NVM_VPD_LEN;
12783         }
12784
12785         buf = kmalloc(len, GFP_KERNEL);
12786         if (buf == NULL)
12787                 return NULL;
12788
12789         if (magic == TG3_EEPROM_MAGIC) {
12790                 for (i = 0; i < len; i += 4) {
12791                         /* The data is in little-endian format in NVRAM.
12792                          * Use the big-endian read routines to preserve
12793                          * the byte order as it exists in NVRAM.
12794                          */
12795                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12796                                 goto error;
12797                 }
12798         } else {
12799                 u8 *ptr;
12800                 ssize_t cnt;
12801                 unsigned int pos = 0;
12802
12803                 ptr = (u8 *)&buf[0];
12804                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12805                         cnt = pci_read_vpd(tp->pdev, pos,
12806                                            len - pos, ptr);
12807                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12808                                 cnt = 0;
12809                         else if (cnt < 0)
12810                                 goto error;
12811                 }
12812                 if (pos != len)
12813                         goto error;
12814         }
12815
12816         *vpdlen = len;
12817
12818         return buf;
12819
12820 error:
12821         kfree(buf);
12822         return NULL;
12823 }
12824
12825 #define NVRAM_TEST_SIZE 0x100
12826 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12827 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12828 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12829 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12830 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12831 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12832 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12833 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12834
12835 static int tg3_test_nvram(struct tg3 *tp)
12836 {
12837         u32 csum, magic, len;
12838         __be32 *buf;
12839         int i, j, k, err = 0, size;
12840
12841         if (tg3_flag(tp, NO_NVRAM))
12842                 return 0;
12843
12844         if (tg3_nvram_read(tp, 0, &magic) != 0)
12845                 return -EIO;
12846
12847         if (magic == TG3_EEPROM_MAGIC)
12848                 size = NVRAM_TEST_SIZE;
12849         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12850                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12851                     TG3_EEPROM_SB_FORMAT_1) {
12852                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12853                         case TG3_EEPROM_SB_REVISION_0:
12854                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12855                                 break;
12856                         case TG3_EEPROM_SB_REVISION_2:
12857                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12858                                 break;
12859                         case TG3_EEPROM_SB_REVISION_3:
12860                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12861                                 break;
12862                         case TG3_EEPROM_SB_REVISION_4:
12863                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12864                                 break;
12865                         case TG3_EEPROM_SB_REVISION_5:
12866                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12867                                 break;
12868                         case TG3_EEPROM_SB_REVISION_6:
12869                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12870                                 break;
12871                         default:
12872                                 return -EIO;
12873                         }
12874                 } else
12875                         return 0;
12876         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12877                 size = NVRAM_SELFBOOT_HW_SIZE;
12878         else
12879                 return -EIO;
12880
12881         buf = kmalloc(size, GFP_KERNEL);
12882         if (buf == NULL)
12883                 return -ENOMEM;
12884
12885         err = -EIO;
12886         for (i = 0, j = 0; i < size; i += 4, j++) {
12887                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12888                 if (err)
12889                         break;
12890         }
12891         if (i < size)
12892                 goto out;
12893
12894         /* Selfboot format */
12895         magic = be32_to_cpu(buf[0]);
12896         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12897             TG3_EEPROM_MAGIC_FW) {
12898                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12899
12900                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12901                     TG3_EEPROM_SB_REVISION_2) {
12902                         /* For rev 2, the csum doesn't include the MBA. */
12903                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12904                                 csum8 += buf8[i];
12905                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12906                                 csum8 += buf8[i];
12907                 } else {
12908                         for (i = 0; i < size; i++)
12909                                 csum8 += buf8[i];
12910                 }
12911
12912                 if (csum8 == 0) {
12913                         err = 0;
12914                         goto out;
12915                 }
12916
12917                 err = -EIO;
12918                 goto out;
12919         }
12920
12921         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12922             TG3_EEPROM_MAGIC_HW) {
12923                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12924                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12925                 u8 *buf8 = (u8 *) buf;
12926
12927                 /* Separate the parity bits and the data bytes.  */
12928                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12929                         if ((i == 0) || (i == 8)) {
12930                                 int l;
12931                                 u8 msk;
12932
12933                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12934                                         parity[k++] = buf8[i] & msk;
12935                                 i++;
12936                         } else if (i == 16) {
12937                                 int l;
12938                                 u8 msk;
12939
12940                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12941                                         parity[k++] = buf8[i] & msk;
12942                                 i++;
12943
12944                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12945                                         parity[k++] = buf8[i] & msk;
12946                                 i++;
12947                         }
12948                         data[j++] = buf8[i];
12949                 }
12950
12951                 err = -EIO;
12952                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12953                         u8 hw8 = hweight8(data[i]);
12954
12955                         if ((hw8 & 0x1) && parity[i])
12956                                 goto out;
12957                         else if (!(hw8 & 0x1) && !parity[i])
12958                                 goto out;
12959                 }
12960                 err = 0;
12961                 goto out;
12962         }
12963
12964         err = -EIO;
12965
12966         /* Bootstrap checksum at offset 0x10 */
12967         csum = calc_crc((unsigned char *) buf, 0x10);
12968         if (csum != le32_to_cpu(buf[0x10/4]))
12969                 goto out;
12970
12971         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12972         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12973         if (csum != le32_to_cpu(buf[0xfc/4]))
12974                 goto out;
12975
12976         kfree(buf);
12977
12978         buf = tg3_vpd_readblock(tp, &len);
12979         if (!buf)
12980                 return -ENOMEM;
12981
12982         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12983         if (i > 0) {
12984                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12985                 if (j < 0)
12986                         goto out;
12987
12988                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12989                         goto out;
12990
12991                 i += PCI_VPD_LRDT_TAG_SIZE;
12992                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12993                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12994                 if (j > 0) {
12995                         u8 csum8 = 0;
12996
12997                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12998
12999                         for (i = 0; i <= j; i++)
13000                                 csum8 += ((u8 *)buf)[i];
13001
13002                         if (csum8)
13003                                 goto out;
13004                 }
13005         }
13006
13007         err = 0;
13008
13009 out:
13010         kfree(buf);
13011         return err;
13012 }
13013
13014 #define TG3_SERDES_TIMEOUT_SEC  2
13015 #define TG3_COPPER_TIMEOUT_SEC  6
13016
13017 static int tg3_test_link(struct tg3 *tp)
13018 {
13019         int i, max;
13020
13021         if (!netif_running(tp->dev))
13022                 return -ENODEV;
13023
13024         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13025                 max = TG3_SERDES_TIMEOUT_SEC;
13026         else
13027                 max = TG3_COPPER_TIMEOUT_SEC;
13028
13029         for (i = 0; i < max; i++) {
13030                 if (tp->link_up)
13031                         return 0;
13032
13033                 if (msleep_interruptible(1000))
13034                         break;
13035         }
13036
13037         return -EIO;
13038 }
13039
13040 /* Only test the commonly used registers */
13041 static int tg3_test_registers(struct tg3 *tp)
13042 {
13043         int i, is_5705, is_5750;
13044         u32 offset, read_mask, write_mask, val, save_val, read_val;
13045         static struct {
13046                 u16 offset;
13047                 u16 flags;
13048 #define TG3_FL_5705     0x1
13049 #define TG3_FL_NOT_5705 0x2
13050 #define TG3_FL_NOT_5788 0x4
13051 #define TG3_FL_NOT_5750 0x8
13052                 u32 read_mask;
13053                 u32 write_mask;
13054         } reg_tbl[] = {
13055                 /* MAC Control Registers */
13056                 { MAC_MODE, TG3_FL_NOT_5705,
13057                         0x00000000, 0x00ef6f8c },
13058                 { MAC_MODE, TG3_FL_5705,
13059                         0x00000000, 0x01ef6b8c },
13060                 { MAC_STATUS, TG3_FL_NOT_5705,
13061                         0x03800107, 0x00000000 },
13062                 { MAC_STATUS, TG3_FL_5705,
13063                         0x03800100, 0x00000000 },
13064                 { MAC_ADDR_0_HIGH, 0x0000,
13065                         0x00000000, 0x0000ffff },
13066                 { MAC_ADDR_0_LOW, 0x0000,
13067                         0x00000000, 0xffffffff },
13068                 { MAC_RX_MTU_SIZE, 0x0000,
13069                         0x00000000, 0x0000ffff },
13070                 { MAC_TX_MODE, 0x0000,
13071                         0x00000000, 0x00000070 },
13072                 { MAC_TX_LENGTHS, 0x0000,
13073                         0x00000000, 0x00003fff },
13074                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13075                         0x00000000, 0x000007fc },
13076                 { MAC_RX_MODE, TG3_FL_5705,
13077                         0x00000000, 0x000007dc },
13078                 { MAC_HASH_REG_0, 0x0000,
13079                         0x00000000, 0xffffffff },
13080                 { MAC_HASH_REG_1, 0x0000,
13081                         0x00000000, 0xffffffff },
13082                 { MAC_HASH_REG_2, 0x0000,
13083                         0x00000000, 0xffffffff },
13084                 { MAC_HASH_REG_3, 0x0000,
13085                         0x00000000, 0xffffffff },
13086
13087                 /* Receive Data and Receive BD Initiator Control Registers. */
13088                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13089                         0x00000000, 0xffffffff },
13090                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13091                         0x00000000, 0xffffffff },
13092                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13093                         0x00000000, 0x00000003 },
13094                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13095                         0x00000000, 0xffffffff },
13096                 { RCVDBDI_STD_BD+0, 0x0000,
13097                         0x00000000, 0xffffffff },
13098                 { RCVDBDI_STD_BD+4, 0x0000,
13099                         0x00000000, 0xffffffff },
13100                 { RCVDBDI_STD_BD+8, 0x0000,
13101                         0x00000000, 0xffff0002 },
13102                 { RCVDBDI_STD_BD+0xc, 0x0000,
13103                         0x00000000, 0xffffffff },
13104
13105                 /* Receive BD Initiator Control Registers. */
13106                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13107                         0x00000000, 0xffffffff },
13108                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13109                         0x00000000, 0x000003ff },
13110                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13111                         0x00000000, 0xffffffff },
13112
13113                 /* Host Coalescing Control Registers. */
13114                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13115                         0x00000000, 0x00000004 },
13116                 { HOSTCC_MODE, TG3_FL_5705,
13117                         0x00000000, 0x000000f6 },
13118                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13119                         0x00000000, 0xffffffff },
13120                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13121                         0x00000000, 0x000003ff },
13122                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13125                         0x00000000, 0x000003ff },
13126                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13127                         0x00000000, 0xffffffff },
13128                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13129                         0x00000000, 0x000000ff },
13130                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13131                         0x00000000, 0xffffffff },
13132                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13133                         0x00000000, 0x000000ff },
13134                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13135                         0x00000000, 0xffffffff },
13136                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13137                         0x00000000, 0xffffffff },
13138                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13139                         0x00000000, 0xffffffff },
13140                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13141                         0x00000000, 0x000000ff },
13142                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13143                         0x00000000, 0xffffffff },
13144                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13145                         0x00000000, 0x000000ff },
13146                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13149                         0x00000000, 0xffffffff },
13150                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13153                         0x00000000, 0xffffffff },
13154                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13155                         0x00000000, 0xffffffff },
13156                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13157                         0xffffffff, 0x00000000 },
13158                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13159                         0xffffffff, 0x00000000 },
13160
13161                 /* Buffer Manager Control Registers. */
13162                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13163                         0x00000000, 0x007fff80 },
13164                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13165                         0x00000000, 0x007fffff },
13166                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13167                         0x00000000, 0x0000003f },
13168                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13169                         0x00000000, 0x000001ff },
13170                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13171                         0x00000000, 0x000001ff },
13172                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13173                         0xffffffff, 0x00000000 },
13174                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13175                         0xffffffff, 0x00000000 },
13176
13177                 /* Mailbox Registers */
13178                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13179                         0x00000000, 0x000001ff },
13180                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13181                         0x00000000, 0x000001ff },
13182                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13183                         0x00000000, 0x000007ff },
13184                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13185                         0x00000000, 0x000001ff },
13186
13187                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13188         };
13189
13190         is_5705 = is_5750 = 0;
13191         if (tg3_flag(tp, 5705_PLUS)) {
13192                 is_5705 = 1;
13193                 if (tg3_flag(tp, 5750_PLUS))
13194                         is_5750 = 1;
13195         }
13196
13197         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13198                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13199                         continue;
13200
13201                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13202                         continue;
13203
13204                 if (tg3_flag(tp, IS_5788) &&
13205                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13206                         continue;
13207
13208                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13209                         continue;
13210
13211                 offset = (u32) reg_tbl[i].offset;
13212                 read_mask = reg_tbl[i].read_mask;
13213                 write_mask = reg_tbl[i].write_mask;
13214
13215                 /* Save the original register content */
13216                 save_val = tr32(offset);
13217
13218                 /* Determine the read-only value. */
13219                 read_val = save_val & read_mask;
13220
13221                 /* Write zero to the register, then make sure the read-only bits
13222                  * are not changed and the read/write bits are all zeros.
13223                  */
13224                 tw32(offset, 0);
13225
13226                 val = tr32(offset);
13227
13228                 /* Test the read-only and read/write bits. */
13229                 if (((val & read_mask) != read_val) || (val & write_mask))
13230                         goto out;
13231
13232                 /* Write ones to all the bits defined by RdMask and WrMask, then
13233                  * make sure the read-only bits are not changed and the
13234                  * read/write bits are all ones.
13235                  */
13236                 tw32(offset, read_mask | write_mask);
13237
13238                 val = tr32(offset);
13239
13240                 /* Test the read-only bits. */
13241                 if ((val & read_mask) != read_val)
13242                         goto out;
13243
13244                 /* Test the read/write bits. */
13245                 if ((val & write_mask) != write_mask)
13246                         goto out;
13247
13248                 tw32(offset, save_val);
13249         }
13250
13251         return 0;
13252
13253 out:
13254         if (netif_msg_hw(tp))
13255                 netdev_err(tp->dev,
13256                            "Register test failed at offset %x\n", offset);
13257         tw32(offset, save_val);
13258         return -EIO;
13259 }
13260
13261 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13262 {
13263         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13264         int i;
13265         u32 j;
13266
13267         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13268                 for (j = 0; j < len; j += 4) {
13269                         u32 val;
13270
13271                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13272                         tg3_read_mem(tp, offset + j, &val);
13273                         if (val != test_pattern[i])
13274                                 return -EIO;
13275                 }
13276         }
13277         return 0;
13278 }
13279
13280 static int tg3_test_memory(struct tg3 *tp)
13281 {
13282         static struct mem_entry {
13283                 u32 offset;
13284                 u32 len;
13285         } mem_tbl_570x[] = {
13286                 { 0x00000000, 0x00b50},
13287                 { 0x00002000, 0x1c000},
13288                 { 0xffffffff, 0x00000}
13289         }, mem_tbl_5705[] = {
13290                 { 0x00000100, 0x0000c},
13291                 { 0x00000200, 0x00008},
13292                 { 0x00004000, 0x00800},
13293                 { 0x00006000, 0x01000},
13294                 { 0x00008000, 0x02000},
13295                 { 0x00010000, 0x0e000},
13296                 { 0xffffffff, 0x00000}
13297         }, mem_tbl_5755[] = {
13298                 { 0x00000200, 0x00008},
13299                 { 0x00004000, 0x00800},
13300                 { 0x00006000, 0x00800},
13301                 { 0x00008000, 0x02000},
13302                 { 0x00010000, 0x0c000},
13303                 { 0xffffffff, 0x00000}
13304         }, mem_tbl_5906[] = {
13305                 { 0x00000200, 0x00008},
13306                 { 0x00004000, 0x00400},
13307                 { 0x00006000, 0x00400},
13308                 { 0x00008000, 0x01000},
13309                 { 0x00010000, 0x01000},
13310                 { 0xffffffff, 0x00000}
13311         }, mem_tbl_5717[] = {
13312                 { 0x00000200, 0x00008},
13313                 { 0x00010000, 0x0a000},
13314                 { 0x00020000, 0x13c00},
13315                 { 0xffffffff, 0x00000}
13316         }, mem_tbl_57765[] = {
13317                 { 0x00000200, 0x00008},
13318                 { 0x00004000, 0x00800},
13319                 { 0x00006000, 0x09800},
13320                 { 0x00010000, 0x0a000},
13321                 { 0xffffffff, 0x00000}
13322         };
13323         struct mem_entry *mem_tbl;
13324         int err = 0;
13325         int i;
13326
13327         if (tg3_flag(tp, 5717_PLUS))
13328                 mem_tbl = mem_tbl_5717;
13329         else if (tg3_flag(tp, 57765_CLASS) ||
13330                  tg3_asic_rev(tp) == ASIC_REV_5762)
13331                 mem_tbl = mem_tbl_57765;
13332         else if (tg3_flag(tp, 5755_PLUS))
13333                 mem_tbl = mem_tbl_5755;
13334         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13335                 mem_tbl = mem_tbl_5906;
13336         else if (tg3_flag(tp, 5705_PLUS))
13337                 mem_tbl = mem_tbl_5705;
13338         else
13339                 mem_tbl = mem_tbl_570x;
13340
13341         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13342                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13343                 if (err)
13344                         break;
13345         }
13346
13347         return err;
13348 }
13349
13350 #define TG3_TSO_MSS             500
13351
13352 #define TG3_TSO_IP_HDR_LEN      20
13353 #define TG3_TSO_TCP_HDR_LEN     20
13354 #define TG3_TSO_TCP_OPT_LEN     12
13355
13356 static const u8 tg3_tso_header[] = {
13357 0x08, 0x00,
13358 0x45, 0x00, 0x00, 0x00,
13359 0x00, 0x00, 0x40, 0x00,
13360 0x40, 0x06, 0x00, 0x00,
13361 0x0a, 0x00, 0x00, 0x01,
13362 0x0a, 0x00, 0x00, 0x02,
13363 0x0d, 0x00, 0xe0, 0x00,
13364 0x00, 0x00, 0x01, 0x00,
13365 0x00, 0x00, 0x02, 0x00,
13366 0x80, 0x10, 0x10, 0x00,
13367 0x14, 0x09, 0x00, 0x00,
13368 0x01, 0x01, 0x08, 0x0a,
13369 0x11, 0x11, 0x11, 0x11,
13370 0x11, 0x11, 0x11, 0x11,
13371 };
13372
13373 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13374 {
13375         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13376         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13377         u32 budget;
13378         struct sk_buff *skb;
13379         u8 *tx_data, *rx_data;
13380         dma_addr_t map;
13381         int num_pkts, tx_len, rx_len, i, err;
13382         struct tg3_rx_buffer_desc *desc;
13383         struct tg3_napi *tnapi, *rnapi;
13384         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13385
13386         tnapi = &tp->napi[0];
13387         rnapi = &tp->napi[0];
13388         if (tp->irq_cnt > 1) {
13389                 if (tg3_flag(tp, ENABLE_RSS))
13390                         rnapi = &tp->napi[1];
13391                 if (tg3_flag(tp, ENABLE_TSS))
13392                         tnapi = &tp->napi[1];
13393         }
13394         coal_now = tnapi->coal_now | rnapi->coal_now;
13395
13396         err = -EIO;
13397
13398         tx_len = pktsz;
13399         skb = netdev_alloc_skb(tp->dev, tx_len);
13400         if (!skb)
13401                 return -ENOMEM;
13402
13403         tx_data = skb_put(skb, tx_len);
13404         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13405         memset(tx_data + ETH_ALEN, 0x0, 8);
13406
13407         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13408
13409         if (tso_loopback) {
13410                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13411
13412                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13413                               TG3_TSO_TCP_OPT_LEN;
13414
13415                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13416                        sizeof(tg3_tso_header));
13417                 mss = TG3_TSO_MSS;
13418
13419                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13420                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13421
13422                 /* Set the total length field in the IP header */
13423                 iph->tot_len = htons((u16)(mss + hdr_len));
13424
13425                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13426                               TXD_FLAG_CPU_POST_DMA);
13427
13428                 if (tg3_flag(tp, HW_TSO_1) ||
13429                     tg3_flag(tp, HW_TSO_2) ||
13430                     tg3_flag(tp, HW_TSO_3)) {
13431                         struct tcphdr *th;
13432                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13433                         th = (struct tcphdr *)&tx_data[val];
13434                         th->check = 0;
13435                 } else
13436                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13437
13438                 if (tg3_flag(tp, HW_TSO_3)) {
13439                         mss |= (hdr_len & 0xc) << 12;
13440                         if (hdr_len & 0x10)
13441                                 base_flags |= 0x00000010;
13442                         base_flags |= (hdr_len & 0x3e0) << 5;
13443                 } else if (tg3_flag(tp, HW_TSO_2))
13444                         mss |= hdr_len << 9;
13445                 else if (tg3_flag(tp, HW_TSO_1) ||
13446                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13447                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13448                 } else {
13449                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13450                 }
13451
13452                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13453         } else {
13454                 num_pkts = 1;
13455                 data_off = ETH_HLEN;
13456
13457                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13458                     tx_len > VLAN_ETH_FRAME_LEN)
13459                         base_flags |= TXD_FLAG_JMB_PKT;
13460         }
13461
13462         for (i = data_off; i < tx_len; i++)
13463                 tx_data[i] = (u8) (i & 0xff);
13464
13465         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13466         if (pci_dma_mapping_error(tp->pdev, map)) {
13467                 dev_kfree_skb(skb);
13468                 return -EIO;
13469         }
13470
13471         val = tnapi->tx_prod;
13472         tnapi->tx_buffers[val].skb = skb;
13473         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13474
13475         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13476                rnapi->coal_now);
13477
13478         udelay(10);
13479
13480         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13481
13482         budget = tg3_tx_avail(tnapi);
13483         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13484                             base_flags | TXD_FLAG_END, mss, 0)) {
13485                 tnapi->tx_buffers[val].skb = NULL;
13486                 dev_kfree_skb(skb);
13487                 return -EIO;
13488         }
13489
13490         tnapi->tx_prod++;
13491
13492         /* Sync BD data before updating mailbox */
13493         wmb();
13494
13495         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13496         tr32_mailbox(tnapi->prodmbox);
13497
13498         udelay(10);
13499
13500         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13501         for (i = 0; i < 35; i++) {
13502                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13503                        coal_now);
13504
13505                 udelay(10);
13506
13507                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13508                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13509                 if ((tx_idx == tnapi->tx_prod) &&
13510                     (rx_idx == (rx_start_idx + num_pkts)))
13511                         break;
13512         }
13513
13514         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13515         dev_kfree_skb(skb);
13516
13517         if (tx_idx != tnapi->tx_prod)
13518                 goto out;
13519
13520         if (rx_idx != rx_start_idx + num_pkts)
13521                 goto out;
13522
13523         val = data_off;
13524         while (rx_idx != rx_start_idx) {
13525                 desc = &rnapi->rx_rcb[rx_start_idx++];
13526                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13527                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13528
13529                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13530                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13531                         goto out;
13532
13533                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13534                          - ETH_FCS_LEN;
13535
13536                 if (!tso_loopback) {
13537                         if (rx_len != tx_len)
13538                                 goto out;
13539
13540                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13541                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13542                                         goto out;
13543                         } else {
13544                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13545                                         goto out;
13546                         }
13547                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13548                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13549                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13550                         goto out;
13551                 }
13552
13553                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13554                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13555                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13556                                              mapping);
13557                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13558                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13559                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13560                                              mapping);
13561                 } else
13562                         goto out;
13563
13564                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13565                                             PCI_DMA_FROMDEVICE);
13566
13567                 rx_data += TG3_RX_OFFSET(tp);
13568                 for (i = data_off; i < rx_len; i++, val++) {
13569                         if (*(rx_data + i) != (u8) (val & 0xff))
13570                                 goto out;
13571                 }
13572         }
13573
13574         err = 0;
13575
13576         /* tg3_free_rings will unmap and free the rx_data */
13577 out:
13578         return err;
13579 }
13580
13581 #define TG3_STD_LOOPBACK_FAILED         1
13582 #define TG3_JMB_LOOPBACK_FAILED         2
13583 #define TG3_TSO_LOOPBACK_FAILED         4
13584 #define TG3_LOOPBACK_FAILED \
13585         (TG3_STD_LOOPBACK_FAILED | \
13586          TG3_JMB_LOOPBACK_FAILED | \
13587          TG3_TSO_LOOPBACK_FAILED)
13588
13589 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13590 {
13591         int err = -EIO;
13592         u32 eee_cap;
13593         u32 jmb_pkt_sz = 9000;
13594
13595         if (tp->dma_limit)
13596                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13597
13598         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13599         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13600
13601         if (!netif_running(tp->dev)) {
13602                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13603                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604                 if (do_extlpbk)
13605                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13606                 goto done;
13607         }
13608
13609         err = tg3_reset_hw(tp, true);
13610         if (err) {
13611                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13612                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13613                 if (do_extlpbk)
13614                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13615                 goto done;
13616         }
13617
13618         if (tg3_flag(tp, ENABLE_RSS)) {
13619                 int i;
13620
13621                 /* Reroute all rx packets to the 1st queue */
13622                 for (i = MAC_RSS_INDIR_TBL_0;
13623                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13624                         tw32(i, 0x0);
13625         }
13626
13627         /* HW errata - mac loopback fails in some cases on 5780.
13628          * Normal traffic and PHY loopback are not affected by
13629          * errata.  Also, the MAC loopback test is deprecated for
13630          * all newer ASIC revisions.
13631          */
13632         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13633             !tg3_flag(tp, CPMU_PRESENT)) {
13634                 tg3_mac_loopback(tp, true);
13635
13636                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13637                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13638
13639                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13640                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13641                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13642
13643                 tg3_mac_loopback(tp, false);
13644         }
13645
13646         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13647             !tg3_flag(tp, USE_PHYLIB)) {
13648                 int i;
13649
13650                 tg3_phy_lpbk_set(tp, 0, false);
13651
13652                 /* Wait for link */
13653                 for (i = 0; i < 100; i++) {
13654                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13655                                 break;
13656                         mdelay(1);
13657                 }
13658
13659                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13660                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13661                 if (tg3_flag(tp, TSO_CAPABLE) &&
13662                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13663                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13664                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13665                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13666                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13667
13668                 if (do_extlpbk) {
13669                         tg3_phy_lpbk_set(tp, 0, true);
13670
13671                         /* All link indications report up, but the hardware
13672                          * isn't really ready for about 20 msec.  Double it
13673                          * to be sure.
13674                          */
13675                         mdelay(40);
13676
13677                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13678                                 data[TG3_EXT_LOOPB_TEST] |=
13679                                                         TG3_STD_LOOPBACK_FAILED;
13680                         if (tg3_flag(tp, TSO_CAPABLE) &&
13681                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13682                                 data[TG3_EXT_LOOPB_TEST] |=
13683                                                         TG3_TSO_LOOPBACK_FAILED;
13684                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13685                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13686                                 data[TG3_EXT_LOOPB_TEST] |=
13687                                                         TG3_JMB_LOOPBACK_FAILED;
13688                 }
13689
13690                 /* Re-enable gphy autopowerdown. */
13691                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13692                         tg3_phy_toggle_apd(tp, true);
13693         }
13694
13695         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13696                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13697
13698 done:
13699         tp->phy_flags |= eee_cap;
13700
13701         return err;
13702 }
13703
13704 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13705                           u64 *data)
13706 {
13707         struct tg3 *tp = netdev_priv(dev);
13708         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13709
13710         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13711                 if (tg3_power_up(tp)) {
13712                         etest->flags |= ETH_TEST_FL_FAILED;
13713                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13714                         return;
13715                 }
13716                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13717         }
13718
13719         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13720
13721         if (tg3_test_nvram(tp) != 0) {
13722                 etest->flags |= ETH_TEST_FL_FAILED;
13723                 data[TG3_NVRAM_TEST] = 1;
13724         }
13725         if (!doextlpbk && tg3_test_link(tp)) {
13726                 etest->flags |= ETH_TEST_FL_FAILED;
13727                 data[TG3_LINK_TEST] = 1;
13728         }
13729         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13730                 int err, err2 = 0, irq_sync = 0;
13731
13732                 if (netif_running(dev)) {
13733                         tg3_phy_stop(tp);
13734                         tg3_netif_stop(tp);
13735                         irq_sync = 1;
13736                 }
13737
13738                 tg3_full_lock(tp, irq_sync);
13739                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13740                 err = tg3_nvram_lock(tp);
13741                 tg3_halt_cpu(tp, RX_CPU_BASE);
13742                 if (!tg3_flag(tp, 5705_PLUS))
13743                         tg3_halt_cpu(tp, TX_CPU_BASE);
13744                 if (!err)
13745                         tg3_nvram_unlock(tp);
13746
13747                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13748                         tg3_phy_reset(tp);
13749
13750                 if (tg3_test_registers(tp) != 0) {
13751                         etest->flags |= ETH_TEST_FL_FAILED;
13752                         data[TG3_REGISTER_TEST] = 1;
13753                 }
13754
13755                 if (tg3_test_memory(tp) != 0) {
13756                         etest->flags |= ETH_TEST_FL_FAILED;
13757                         data[TG3_MEMORY_TEST] = 1;
13758                 }
13759
13760                 if (doextlpbk)
13761                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13762
13763                 if (tg3_test_loopback(tp, data, doextlpbk))
13764                         etest->flags |= ETH_TEST_FL_FAILED;
13765
13766                 tg3_full_unlock(tp);
13767
13768                 if (tg3_test_interrupt(tp) != 0) {
13769                         etest->flags |= ETH_TEST_FL_FAILED;
13770                         data[TG3_INTERRUPT_TEST] = 1;
13771                 }
13772
13773                 tg3_full_lock(tp, 0);
13774
13775                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13776                 if (netif_running(dev)) {
13777                         tg3_flag_set(tp, INIT_COMPLETE);
13778                         err2 = tg3_restart_hw(tp, true);
13779                         if (!err2)
13780                                 tg3_netif_start(tp);
13781                 }
13782
13783                 tg3_full_unlock(tp);
13784
13785                 if (irq_sync && !err2)
13786                         tg3_phy_start(tp);
13787         }
13788         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13789                 tg3_power_down_prepare(tp);
13790
13791 }
13792
13793 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13794 {
13795         struct tg3 *tp = netdev_priv(dev);
13796         struct hwtstamp_config stmpconf;
13797
13798         if (!tg3_flag(tp, PTP_CAPABLE))
13799                 return -EOPNOTSUPP;
13800
13801         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13802                 return -EFAULT;
13803
13804         if (stmpconf.flags)
13805                 return -EINVAL;
13806
13807         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13808             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13809                 return -ERANGE;
13810
13811         switch (stmpconf.rx_filter) {
13812         case HWTSTAMP_FILTER_NONE:
13813                 tp->rxptpctl = 0;
13814                 break;
13815         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13816                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13817                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13818                 break;
13819         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13820                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13821                                TG3_RX_PTP_CTL_SYNC_EVNT;
13822                 break;
13823         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13824                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13825                                TG3_RX_PTP_CTL_DELAY_REQ;
13826                 break;
13827         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13828                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13829                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13830                 break;
13831         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13832                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13833                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13834                 break;
13835         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13836                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13837                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13838                 break;
13839         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13840                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13841                                TG3_RX_PTP_CTL_SYNC_EVNT;
13842                 break;
13843         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13845                                TG3_RX_PTP_CTL_SYNC_EVNT;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13849                                TG3_RX_PTP_CTL_SYNC_EVNT;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13853                                TG3_RX_PTP_CTL_DELAY_REQ;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13857                                TG3_RX_PTP_CTL_DELAY_REQ;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13861                                TG3_RX_PTP_CTL_DELAY_REQ;
13862                 break;
13863         default:
13864                 return -ERANGE;
13865         }
13866
13867         if (netif_running(dev) && tp->rxptpctl)
13868                 tw32(TG3_RX_PTP_CTL,
13869                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13870
13871         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13872                 tg3_flag_set(tp, TX_TSTAMP_EN);
13873         else
13874                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13875
13876         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13877                 -EFAULT : 0;
13878 }
13879
13880 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13881 {
13882         struct tg3 *tp = netdev_priv(dev);
13883         struct hwtstamp_config stmpconf;
13884
13885         if (!tg3_flag(tp, PTP_CAPABLE))
13886                 return -EOPNOTSUPP;
13887
13888         stmpconf.flags = 0;
13889         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13890                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13891
13892         switch (tp->rxptpctl) {
13893         case 0:
13894                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13895                 break;
13896         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13897                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13898                 break;
13899         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13900                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13901                 break;
13902         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13903                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13904                 break;
13905         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13906                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13907                 break;
13908         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13909                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13910                 break;
13911         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13912                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13913                 break;
13914         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13915                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13916                 break;
13917         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13918                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13919                 break;
13920         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13921                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13922                 break;
13923         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13924                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13925                 break;
13926         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13927                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13928                 break;
13929         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13930                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13931                 break;
13932         default:
13933                 WARN_ON_ONCE(1);
13934                 return -ERANGE;
13935         }
13936
13937         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13938                 -EFAULT : 0;
13939 }
13940
13941 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13942 {
13943         struct mii_ioctl_data *data = if_mii(ifr);
13944         struct tg3 *tp = netdev_priv(dev);
13945         int err;
13946
13947         if (tg3_flag(tp, USE_PHYLIB)) {
13948                 struct phy_device *phydev;
13949                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13950                         return -EAGAIN;
13951                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13952                 return phy_mii_ioctl(phydev, ifr, cmd);
13953         }
13954
13955         switch (cmd) {
13956         case SIOCGMIIPHY:
13957                 data->phy_id = tp->phy_addr;
13958
13959                 /* fallthru */
13960         case SIOCGMIIREG: {
13961                 u32 mii_regval;
13962
13963                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13964                         break;                  /* We have no PHY */
13965
13966                 if (!netif_running(dev))
13967                         return -EAGAIN;
13968
13969                 spin_lock_bh(&tp->lock);
13970                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13971                                     data->reg_num & 0x1f, &mii_regval);
13972                 spin_unlock_bh(&tp->lock);
13973
13974                 data->val_out = mii_regval;
13975
13976                 return err;
13977         }
13978
13979         case SIOCSMIIREG:
13980                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13981                         break;                  /* We have no PHY */
13982
13983                 if (!netif_running(dev))
13984                         return -EAGAIN;
13985
13986                 spin_lock_bh(&tp->lock);
13987                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13988                                      data->reg_num & 0x1f, data->val_in);
13989                 spin_unlock_bh(&tp->lock);
13990
13991                 return err;
13992
13993         case SIOCSHWTSTAMP:
13994                 return tg3_hwtstamp_set(dev, ifr);
13995
13996         case SIOCGHWTSTAMP:
13997                 return tg3_hwtstamp_get(dev, ifr);
13998
13999         default:
14000                 /* do nothing */
14001                 break;
14002         }
14003         return -EOPNOTSUPP;
14004 }
14005
14006 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14007 {
14008         struct tg3 *tp = netdev_priv(dev);
14009
14010         memcpy(ec, &tp->coal, sizeof(*ec));
14011         return 0;
14012 }
14013
14014 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14015 {
14016         struct tg3 *tp = netdev_priv(dev);
14017         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14018         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14019
14020         if (!tg3_flag(tp, 5705_PLUS)) {
14021                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14022                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14023                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14024                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14025         }
14026
14027         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14028             (!ec->rx_coalesce_usecs) ||
14029             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14030             (!ec->tx_coalesce_usecs) ||
14031             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14032             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14033             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14034             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14035             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14036             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14037             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14038             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14039                 return -EINVAL;
14040
14041         /* Only copy relevant parameters, ignore all others. */
14042         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14043         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14044         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14045         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14046         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14047         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14048         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14049         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14050         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14051
14052         if (netif_running(dev)) {
14053                 tg3_full_lock(tp, 0);
14054                 __tg3_set_coalesce(tp, &tp->coal);
14055                 tg3_full_unlock(tp);
14056         }
14057         return 0;
14058 }
14059
14060 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14061 {
14062         struct tg3 *tp = netdev_priv(dev);
14063
14064         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14065                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14066                 return -EOPNOTSUPP;
14067         }
14068
14069         if (edata->advertised != tp->eee.advertised) {
14070                 netdev_warn(tp->dev,
14071                             "Direct manipulation of EEE advertisement is not supported\n");
14072                 return -EINVAL;
14073         }
14074
14075         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14076                 netdev_warn(tp->dev,
14077                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14078                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14079                 return -EINVAL;
14080         }
14081
14082         tp->eee = *edata;
14083
14084         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14085         tg3_warn_mgmt_link_flap(tp);
14086
14087         if (netif_running(tp->dev)) {
14088                 tg3_full_lock(tp, 0);
14089                 tg3_setup_eee(tp);
14090                 tg3_phy_reset(tp);
14091                 tg3_full_unlock(tp);
14092         }
14093
14094         return 0;
14095 }
14096
14097 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14098 {
14099         struct tg3 *tp = netdev_priv(dev);
14100
14101         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14102                 netdev_warn(tp->dev,
14103                             "Board does not support EEE!\n");
14104                 return -EOPNOTSUPP;
14105         }
14106
14107         *edata = tp->eee;
14108         return 0;
14109 }
14110
14111 static const struct ethtool_ops tg3_ethtool_ops = {
14112         .get_drvinfo            = tg3_get_drvinfo,
14113         .get_regs_len           = tg3_get_regs_len,
14114         .get_regs               = tg3_get_regs,
14115         .get_wol                = tg3_get_wol,
14116         .set_wol                = tg3_set_wol,
14117         .get_msglevel           = tg3_get_msglevel,
14118         .set_msglevel           = tg3_set_msglevel,
14119         .nway_reset             = tg3_nway_reset,
14120         .get_link               = ethtool_op_get_link,
14121         .get_eeprom_len         = tg3_get_eeprom_len,
14122         .get_eeprom             = tg3_get_eeprom,
14123         .set_eeprom             = tg3_set_eeprom,
14124         .get_ringparam          = tg3_get_ringparam,
14125         .set_ringparam          = tg3_set_ringparam,
14126         .get_pauseparam         = tg3_get_pauseparam,
14127         .set_pauseparam         = tg3_set_pauseparam,
14128         .self_test              = tg3_self_test,
14129         .get_strings            = tg3_get_strings,
14130         .set_phys_id            = tg3_set_phys_id,
14131         .get_ethtool_stats      = tg3_get_ethtool_stats,
14132         .get_coalesce           = tg3_get_coalesce,
14133         .set_coalesce           = tg3_set_coalesce,
14134         .get_sset_count         = tg3_get_sset_count,
14135         .get_rxnfc              = tg3_get_rxnfc,
14136         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14137         .get_rxfh               = tg3_get_rxfh,
14138         .set_rxfh               = tg3_set_rxfh,
14139         .get_channels           = tg3_get_channels,
14140         .set_channels           = tg3_set_channels,
14141         .get_ts_info            = tg3_get_ts_info,
14142         .get_eee                = tg3_get_eee,
14143         .set_eee                = tg3_set_eee,
14144         .get_link_ksettings     = tg3_get_link_ksettings,
14145         .set_link_ksettings     = tg3_set_link_ksettings,
14146 };
14147
14148 static void tg3_get_stats64(struct net_device *dev,
14149                             struct rtnl_link_stats64 *stats)
14150 {
14151         struct tg3 *tp = netdev_priv(dev);
14152
14153         spin_lock_bh(&tp->lock);
14154         if (!tp->hw_stats) {
14155                 *stats = tp->net_stats_prev;
14156                 spin_unlock_bh(&tp->lock);
14157                 return;
14158         }
14159
14160         tg3_get_nstats(tp, stats);
14161         spin_unlock_bh(&tp->lock);
14162 }
14163
14164 static void tg3_set_rx_mode(struct net_device *dev)
14165 {
14166         struct tg3 *tp = netdev_priv(dev);
14167
14168         if (!netif_running(dev))
14169                 return;
14170
14171         tg3_full_lock(tp, 0);
14172         __tg3_set_rx_mode(dev);
14173         tg3_full_unlock(tp);
14174 }
14175
14176 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14177                                int new_mtu)
14178 {
14179         dev->mtu = new_mtu;
14180
14181         if (new_mtu > ETH_DATA_LEN) {
14182                 if (tg3_flag(tp, 5780_CLASS)) {
14183                         netdev_update_features(dev);
14184                         tg3_flag_clear(tp, TSO_CAPABLE);
14185                 } else {
14186                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14187                 }
14188         } else {
14189                 if (tg3_flag(tp, 5780_CLASS)) {
14190                         tg3_flag_set(tp, TSO_CAPABLE);
14191                         netdev_update_features(dev);
14192                 }
14193                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14194         }
14195 }
14196
14197 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14198 {
14199         struct tg3 *tp = netdev_priv(dev);
14200         int err;
14201         bool reset_phy = false;
14202
14203         if (!netif_running(dev)) {
14204                 /* We'll just catch it later when the
14205                  * device is up'd.
14206                  */
14207                 tg3_set_mtu(dev, tp, new_mtu);
14208                 return 0;
14209         }
14210
14211         tg3_phy_stop(tp);
14212
14213         tg3_netif_stop(tp);
14214
14215         tg3_set_mtu(dev, tp, new_mtu);
14216
14217         tg3_full_lock(tp, 1);
14218
14219         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14220
14221         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14222          * breaks all requests to 256 bytes.
14223          */
14224         if (tg3_asic_rev(tp) == ASIC_REV_57766)
14225                 reset_phy = true;
14226
14227         err = tg3_restart_hw(tp, reset_phy);
14228
14229         if (!err)
14230                 tg3_netif_start(tp);
14231
14232         tg3_full_unlock(tp);
14233
14234         if (!err)
14235                 tg3_phy_start(tp);
14236
14237         return err;
14238 }
14239
14240 static const struct net_device_ops tg3_netdev_ops = {
14241         .ndo_open               = tg3_open,
14242         .ndo_stop               = tg3_close,
14243         .ndo_start_xmit         = tg3_start_xmit,
14244         .ndo_get_stats64        = tg3_get_stats64,
14245         .ndo_validate_addr      = eth_validate_addr,
14246         .ndo_set_rx_mode        = tg3_set_rx_mode,
14247         .ndo_set_mac_address    = tg3_set_mac_addr,
14248         .ndo_do_ioctl           = tg3_ioctl,
14249         .ndo_tx_timeout         = tg3_tx_timeout,
14250         .ndo_change_mtu         = tg3_change_mtu,
14251         .ndo_fix_features       = tg3_fix_features,
14252         .ndo_set_features       = tg3_set_features,
14253 #ifdef CONFIG_NET_POLL_CONTROLLER
14254         .ndo_poll_controller    = tg3_poll_controller,
14255 #endif
14256 };
14257
14258 static void tg3_get_eeprom_size(struct tg3 *tp)
14259 {
14260         u32 cursize, val, magic;
14261
14262         tp->nvram_size = EEPROM_CHIP_SIZE;
14263
14264         if (tg3_nvram_read(tp, 0, &magic) != 0)
14265                 return;
14266
14267         if ((magic != TG3_EEPROM_MAGIC) &&
14268             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14269             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14270                 return;
14271
14272         /*
14273          * Size the chip by reading offsets at increasing powers of two.
14274          * When we encounter our validation signature, we know the addressing
14275          * has wrapped around, and thus have our chip size.
14276          */
14277         cursize = 0x10;
14278
14279         while (cursize < tp->nvram_size) {
14280                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14281                         return;
14282
14283                 if (val == magic)
14284                         break;
14285
14286                 cursize <<= 1;
14287         }
14288
14289         tp->nvram_size = cursize;
14290 }
14291
14292 static void tg3_get_nvram_size(struct tg3 *tp)
14293 {
14294         u32 val;
14295
14296         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14297                 return;
14298
14299         /* Selfboot format */
14300         if (val != TG3_EEPROM_MAGIC) {
14301                 tg3_get_eeprom_size(tp);
14302                 return;
14303         }
14304
14305         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14306                 if (val != 0) {
14307                         /* This is confusing.  We want to operate on the
14308                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14309                          * call will read from NVRAM and byteswap the data
14310                          * according to the byteswapping settings for all
14311                          * other register accesses.  This ensures the data we
14312                          * want will always reside in the lower 16-bits.
14313                          * However, the data in NVRAM is in LE format, which
14314                          * means the data from the NVRAM read will always be
14315                          * opposite the endianness of the CPU.  The 16-bit
14316                          * byteswap then brings the data to CPU endianness.
14317                          */
14318                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14319                         return;
14320                 }
14321         }
14322         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14323 }
14324
14325 static void tg3_get_nvram_info(struct tg3 *tp)
14326 {
14327         u32 nvcfg1;
14328
14329         nvcfg1 = tr32(NVRAM_CFG1);
14330         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14331                 tg3_flag_set(tp, FLASH);
14332         } else {
14333                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14334                 tw32(NVRAM_CFG1, nvcfg1);
14335         }
14336
14337         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14338             tg3_flag(tp, 5780_CLASS)) {
14339                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14340                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14341                         tp->nvram_jedecnum = JEDEC_ATMEL;
14342                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14343                         tg3_flag_set(tp, NVRAM_BUFFERED);
14344                         break;
14345                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14346                         tp->nvram_jedecnum = JEDEC_ATMEL;
14347                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14348                         break;
14349                 case FLASH_VENDOR_ATMEL_EEPROM:
14350                         tp->nvram_jedecnum = JEDEC_ATMEL;
14351                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14352                         tg3_flag_set(tp, NVRAM_BUFFERED);
14353                         break;
14354                 case FLASH_VENDOR_ST:
14355                         tp->nvram_jedecnum = JEDEC_ST;
14356                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14357                         tg3_flag_set(tp, NVRAM_BUFFERED);
14358                         break;
14359                 case FLASH_VENDOR_SAIFUN:
14360                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14361                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14362                         break;
14363                 case FLASH_VENDOR_SST_SMALL:
14364                 case FLASH_VENDOR_SST_LARGE:
14365                         tp->nvram_jedecnum = JEDEC_SST;
14366                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14367                         break;
14368                 }
14369         } else {
14370                 tp->nvram_jedecnum = JEDEC_ATMEL;
14371                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14372                 tg3_flag_set(tp, NVRAM_BUFFERED);
14373         }
14374 }
14375
14376 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14377 {
14378         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14379         case FLASH_5752PAGE_SIZE_256:
14380                 tp->nvram_pagesize = 256;
14381                 break;
14382         case FLASH_5752PAGE_SIZE_512:
14383                 tp->nvram_pagesize = 512;
14384                 break;
14385         case FLASH_5752PAGE_SIZE_1K:
14386                 tp->nvram_pagesize = 1024;
14387                 break;
14388         case FLASH_5752PAGE_SIZE_2K:
14389                 tp->nvram_pagesize = 2048;
14390                 break;
14391         case FLASH_5752PAGE_SIZE_4K:
14392                 tp->nvram_pagesize = 4096;
14393                 break;
14394         case FLASH_5752PAGE_SIZE_264:
14395                 tp->nvram_pagesize = 264;
14396                 break;
14397         case FLASH_5752PAGE_SIZE_528:
14398                 tp->nvram_pagesize = 528;
14399                 break;
14400         }
14401 }
14402
14403 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14404 {
14405         u32 nvcfg1;
14406
14407         nvcfg1 = tr32(NVRAM_CFG1);
14408
14409         /* NVRAM protection for TPM */
14410         if (nvcfg1 & (1 << 27))
14411                 tg3_flag_set(tp, PROTECTED_NVRAM);
14412
14413         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14414         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14415         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14416                 tp->nvram_jedecnum = JEDEC_ATMEL;
14417                 tg3_flag_set(tp, NVRAM_BUFFERED);
14418                 break;
14419         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14420                 tp->nvram_jedecnum = JEDEC_ATMEL;
14421                 tg3_flag_set(tp, NVRAM_BUFFERED);
14422                 tg3_flag_set(tp, FLASH);
14423                 break;
14424         case FLASH_5752VENDOR_ST_M45PE10:
14425         case FLASH_5752VENDOR_ST_M45PE20:
14426         case FLASH_5752VENDOR_ST_M45PE40:
14427                 tp->nvram_jedecnum = JEDEC_ST;
14428                 tg3_flag_set(tp, NVRAM_BUFFERED);
14429                 tg3_flag_set(tp, FLASH);
14430                 break;
14431         }
14432
14433         if (tg3_flag(tp, FLASH)) {
14434                 tg3_nvram_get_pagesize(tp, nvcfg1);
14435         } else {
14436                 /* For eeprom, set pagesize to maximum eeprom size */
14437                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14438
14439                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14440                 tw32(NVRAM_CFG1, nvcfg1);
14441         }
14442 }
14443
14444 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14445 {
14446         u32 nvcfg1, protect = 0;
14447
14448         nvcfg1 = tr32(NVRAM_CFG1);
14449
14450         /* NVRAM protection for TPM */
14451         if (nvcfg1 & (1 << 27)) {
14452                 tg3_flag_set(tp, PROTECTED_NVRAM);
14453                 protect = 1;
14454         }
14455
14456         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14457         switch (nvcfg1) {
14458         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14459         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14460         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14461         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14462                 tp->nvram_jedecnum = JEDEC_ATMEL;
14463                 tg3_flag_set(tp, NVRAM_BUFFERED);
14464                 tg3_flag_set(tp, FLASH);
14465                 tp->nvram_pagesize = 264;
14466                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14467                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14468                         tp->nvram_size = (protect ? 0x3e200 :
14469                                           TG3_NVRAM_SIZE_512KB);
14470                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14471                         tp->nvram_size = (protect ? 0x1f200 :
14472                                           TG3_NVRAM_SIZE_256KB);
14473                 else
14474                         tp->nvram_size = (protect ? 0x1f200 :
14475                                           TG3_NVRAM_SIZE_128KB);
14476                 break;
14477         case FLASH_5752VENDOR_ST_M45PE10:
14478         case FLASH_5752VENDOR_ST_M45PE20:
14479         case FLASH_5752VENDOR_ST_M45PE40:
14480                 tp->nvram_jedecnum = JEDEC_ST;
14481                 tg3_flag_set(tp, NVRAM_BUFFERED);
14482                 tg3_flag_set(tp, FLASH);
14483                 tp->nvram_pagesize = 256;
14484                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14485                         tp->nvram_size = (protect ?
14486                                           TG3_NVRAM_SIZE_64KB :
14487                                           TG3_NVRAM_SIZE_128KB);
14488                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14489                         tp->nvram_size = (protect ?
14490                                           TG3_NVRAM_SIZE_64KB :
14491                                           TG3_NVRAM_SIZE_256KB);
14492                 else
14493                         tp->nvram_size = (protect ?
14494                                           TG3_NVRAM_SIZE_128KB :
14495                                           TG3_NVRAM_SIZE_512KB);
14496                 break;
14497         }
14498 }
14499
14500 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14501 {
14502         u32 nvcfg1;
14503
14504         nvcfg1 = tr32(NVRAM_CFG1);
14505
14506         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14507         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14508         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14509         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14510         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14511                 tp->nvram_jedecnum = JEDEC_ATMEL;
14512                 tg3_flag_set(tp, NVRAM_BUFFERED);
14513                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14514
14515                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14516                 tw32(NVRAM_CFG1, nvcfg1);
14517                 break;
14518         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14519         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14520         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14521         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14522                 tp->nvram_jedecnum = JEDEC_ATMEL;
14523                 tg3_flag_set(tp, NVRAM_BUFFERED);
14524                 tg3_flag_set(tp, FLASH);
14525                 tp->nvram_pagesize = 264;
14526                 break;
14527         case FLASH_5752VENDOR_ST_M45PE10:
14528         case FLASH_5752VENDOR_ST_M45PE20:
14529         case FLASH_5752VENDOR_ST_M45PE40:
14530                 tp->nvram_jedecnum = JEDEC_ST;
14531                 tg3_flag_set(tp, NVRAM_BUFFERED);
14532                 tg3_flag_set(tp, FLASH);
14533                 tp->nvram_pagesize = 256;
14534                 break;
14535         }
14536 }
14537
14538 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14539 {
14540         u32 nvcfg1, protect = 0;
14541
14542         nvcfg1 = tr32(NVRAM_CFG1);
14543
14544         /* NVRAM protection for TPM */
14545         if (nvcfg1 & (1 << 27)) {
14546                 tg3_flag_set(tp, PROTECTED_NVRAM);
14547                 protect = 1;
14548         }
14549
14550         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14551         switch (nvcfg1) {
14552         case FLASH_5761VENDOR_ATMEL_ADB021D:
14553         case FLASH_5761VENDOR_ATMEL_ADB041D:
14554         case FLASH_5761VENDOR_ATMEL_ADB081D:
14555         case FLASH_5761VENDOR_ATMEL_ADB161D:
14556         case FLASH_5761VENDOR_ATMEL_MDB021D:
14557         case FLASH_5761VENDOR_ATMEL_MDB041D:
14558         case FLASH_5761VENDOR_ATMEL_MDB081D:
14559         case FLASH_5761VENDOR_ATMEL_MDB161D:
14560                 tp->nvram_jedecnum = JEDEC_ATMEL;
14561                 tg3_flag_set(tp, NVRAM_BUFFERED);
14562                 tg3_flag_set(tp, FLASH);
14563                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14564                 tp->nvram_pagesize = 256;
14565                 break;
14566         case FLASH_5761VENDOR_ST_A_M45PE20:
14567         case FLASH_5761VENDOR_ST_A_M45PE40:
14568         case FLASH_5761VENDOR_ST_A_M45PE80:
14569         case FLASH_5761VENDOR_ST_A_M45PE16:
14570         case FLASH_5761VENDOR_ST_M_M45PE20:
14571         case FLASH_5761VENDOR_ST_M_M45PE40:
14572         case FLASH_5761VENDOR_ST_M_M45PE80:
14573         case FLASH_5761VENDOR_ST_M_M45PE16:
14574                 tp->nvram_jedecnum = JEDEC_ST;
14575                 tg3_flag_set(tp, NVRAM_BUFFERED);
14576                 tg3_flag_set(tp, FLASH);
14577                 tp->nvram_pagesize = 256;
14578                 break;
14579         }
14580
14581         if (protect) {
14582                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14583         } else {
14584                 switch (nvcfg1) {
14585                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14586                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14587                 case FLASH_5761VENDOR_ST_A_M45PE16:
14588                 case FLASH_5761VENDOR_ST_M_M45PE16:
14589                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14590                         break;
14591                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14592                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14593                 case FLASH_5761VENDOR_ST_A_M45PE80:
14594                 case FLASH_5761VENDOR_ST_M_M45PE80:
14595                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14596                         break;
14597                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14598                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14599                 case FLASH_5761VENDOR_ST_A_M45PE40:
14600                 case FLASH_5761VENDOR_ST_M_M45PE40:
14601                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14602                         break;
14603                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14604                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14605                 case FLASH_5761VENDOR_ST_A_M45PE20:
14606                 case FLASH_5761VENDOR_ST_M_M45PE20:
14607                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14608                         break;
14609                 }
14610         }
14611 }
14612
14613 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14614 {
14615         tp->nvram_jedecnum = JEDEC_ATMEL;
14616         tg3_flag_set(tp, NVRAM_BUFFERED);
14617         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14618 }
14619
14620 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14621 {
14622         u32 nvcfg1;
14623
14624         nvcfg1 = tr32(NVRAM_CFG1);
14625
14626         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14627         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14628         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14629                 tp->nvram_jedecnum = JEDEC_ATMEL;
14630                 tg3_flag_set(tp, NVRAM_BUFFERED);
14631                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14632
14633                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14634                 tw32(NVRAM_CFG1, nvcfg1);
14635                 return;
14636         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14637         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14638         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14639         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14640         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14641         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14642         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14643                 tp->nvram_jedecnum = JEDEC_ATMEL;
14644                 tg3_flag_set(tp, NVRAM_BUFFERED);
14645                 tg3_flag_set(tp, FLASH);
14646
14647                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14648                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14649                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14650                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14651                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14652                         break;
14653                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14654                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14655                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14656                         break;
14657                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14658                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14659                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14660                         break;
14661                 }
14662                 break;
14663         case FLASH_5752VENDOR_ST_M45PE10:
14664         case FLASH_5752VENDOR_ST_M45PE20:
14665         case FLASH_5752VENDOR_ST_M45PE40:
14666                 tp->nvram_jedecnum = JEDEC_ST;
14667                 tg3_flag_set(tp, NVRAM_BUFFERED);
14668                 tg3_flag_set(tp, FLASH);
14669
14670                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14671                 case FLASH_5752VENDOR_ST_M45PE10:
14672                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14673                         break;
14674                 case FLASH_5752VENDOR_ST_M45PE20:
14675                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14676                         break;
14677                 case FLASH_5752VENDOR_ST_M45PE40:
14678                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14679                         break;
14680                 }
14681                 break;
14682         default:
14683                 tg3_flag_set(tp, NO_NVRAM);
14684                 return;
14685         }
14686
14687         tg3_nvram_get_pagesize(tp, nvcfg1);
14688         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14689                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14690 }
14691
14692
14693 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14694 {
14695         u32 nvcfg1;
14696
14697         nvcfg1 = tr32(NVRAM_CFG1);
14698
14699         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14700         case FLASH_5717VENDOR_ATMEL_EEPROM:
14701         case FLASH_5717VENDOR_MICRO_EEPROM:
14702                 tp->nvram_jedecnum = JEDEC_ATMEL;
14703                 tg3_flag_set(tp, NVRAM_BUFFERED);
14704                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14705
14706                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14707                 tw32(NVRAM_CFG1, nvcfg1);
14708                 return;
14709         case FLASH_5717VENDOR_ATMEL_MDB011D:
14710         case FLASH_5717VENDOR_ATMEL_ADB011B:
14711         case FLASH_5717VENDOR_ATMEL_ADB011D:
14712         case FLASH_5717VENDOR_ATMEL_MDB021D:
14713         case FLASH_5717VENDOR_ATMEL_ADB021B:
14714         case FLASH_5717VENDOR_ATMEL_ADB021D:
14715         case FLASH_5717VENDOR_ATMEL_45USPT:
14716                 tp->nvram_jedecnum = JEDEC_ATMEL;
14717                 tg3_flag_set(tp, NVRAM_BUFFERED);
14718                 tg3_flag_set(tp, FLASH);
14719
14720                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14721                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14722                         /* Detect size with tg3_nvram_get_size() */
14723                         break;
14724                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14725                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14726                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14727                         break;
14728                 default:
14729                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14730                         break;
14731                 }
14732                 break;
14733         case FLASH_5717VENDOR_ST_M_M25PE10:
14734         case FLASH_5717VENDOR_ST_A_M25PE10:
14735         case FLASH_5717VENDOR_ST_M_M45PE10:
14736         case FLASH_5717VENDOR_ST_A_M45PE10:
14737         case FLASH_5717VENDOR_ST_M_M25PE20:
14738         case FLASH_5717VENDOR_ST_A_M25PE20:
14739         case FLASH_5717VENDOR_ST_M_M45PE20:
14740         case FLASH_5717VENDOR_ST_A_M45PE20:
14741         case FLASH_5717VENDOR_ST_25USPT:
14742         case FLASH_5717VENDOR_ST_45USPT:
14743                 tp->nvram_jedecnum = JEDEC_ST;
14744                 tg3_flag_set(tp, NVRAM_BUFFERED);
14745                 tg3_flag_set(tp, FLASH);
14746
14747                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14748                 case FLASH_5717VENDOR_ST_M_M25PE20:
14749                 case FLASH_5717VENDOR_ST_M_M45PE20:
14750                         /* Detect size with tg3_nvram_get_size() */
14751                         break;
14752                 case FLASH_5717VENDOR_ST_A_M25PE20:
14753                 case FLASH_5717VENDOR_ST_A_M45PE20:
14754                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14755                         break;
14756                 default:
14757                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14758                         break;
14759                 }
14760                 break;
14761         default:
14762                 tg3_flag_set(tp, NO_NVRAM);
14763                 return;
14764         }
14765
14766         tg3_nvram_get_pagesize(tp, nvcfg1);
14767         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14768                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14769 }
14770
14771 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14772 {
14773         u32 nvcfg1, nvmpinstrp;
14774
14775         nvcfg1 = tr32(NVRAM_CFG1);
14776         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14777
14778         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14779                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14780                         tg3_flag_set(tp, NO_NVRAM);
14781                         return;
14782                 }
14783
14784                 switch (nvmpinstrp) {
14785                 case FLASH_5762_EEPROM_HD:
14786                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14787                         break;
14788                 case FLASH_5762_EEPROM_LD:
14789                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14790                         break;
14791                 case FLASH_5720VENDOR_M_ST_M45PE20:
14792                         /* This pinstrap supports multiple sizes, so force it
14793                          * to read the actual size from location 0xf0.
14794                          */
14795                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14796                         break;
14797                 }
14798         }
14799
14800         switch (nvmpinstrp) {
14801         case FLASH_5720_EEPROM_HD:
14802         case FLASH_5720_EEPROM_LD:
14803                 tp->nvram_jedecnum = JEDEC_ATMEL;
14804                 tg3_flag_set(tp, NVRAM_BUFFERED);
14805
14806                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14807                 tw32(NVRAM_CFG1, nvcfg1);
14808                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14809                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14810                 else
14811                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14812                 return;
14813         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14814         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14815         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14816         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14817         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14818         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14819         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14820         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14821         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14822         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14823         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14824         case FLASH_5720VENDOR_ATMEL_45USPT:
14825                 tp->nvram_jedecnum = JEDEC_ATMEL;
14826                 tg3_flag_set(tp, NVRAM_BUFFERED);
14827                 tg3_flag_set(tp, FLASH);
14828
14829                 switch (nvmpinstrp) {
14830                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14831                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14832                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14833                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14834                         break;
14835                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14836                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14837                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14838                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14839                         break;
14840                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14841                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14842                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14843                         break;
14844                 default:
14845                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14846                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14847                         break;
14848                 }
14849                 break;
14850         case FLASH_5720VENDOR_M_ST_M25PE10:
14851         case FLASH_5720VENDOR_M_ST_M45PE10:
14852         case FLASH_5720VENDOR_A_ST_M25PE10:
14853         case FLASH_5720VENDOR_A_ST_M45PE10:
14854         case FLASH_5720VENDOR_M_ST_M25PE20:
14855         case FLASH_5720VENDOR_M_ST_M45PE20:
14856         case FLASH_5720VENDOR_A_ST_M25PE20:
14857         case FLASH_5720VENDOR_A_ST_M45PE20:
14858         case FLASH_5720VENDOR_M_ST_M25PE40:
14859         case FLASH_5720VENDOR_M_ST_M45PE40:
14860         case FLASH_5720VENDOR_A_ST_M25PE40:
14861         case FLASH_5720VENDOR_A_ST_M45PE40:
14862         case FLASH_5720VENDOR_M_ST_M25PE80:
14863         case FLASH_5720VENDOR_M_ST_M45PE80:
14864         case FLASH_5720VENDOR_A_ST_M25PE80:
14865         case FLASH_5720VENDOR_A_ST_M45PE80:
14866         case FLASH_5720VENDOR_ST_25USPT:
14867         case FLASH_5720VENDOR_ST_45USPT:
14868                 tp->nvram_jedecnum = JEDEC_ST;
14869                 tg3_flag_set(tp, NVRAM_BUFFERED);
14870                 tg3_flag_set(tp, FLASH);
14871
14872                 switch (nvmpinstrp) {
14873                 case FLASH_5720VENDOR_M_ST_M25PE20:
14874                 case FLASH_5720VENDOR_M_ST_M45PE20:
14875                 case FLASH_5720VENDOR_A_ST_M25PE20:
14876                 case FLASH_5720VENDOR_A_ST_M45PE20:
14877                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14878                         break;
14879                 case FLASH_5720VENDOR_M_ST_M25PE40:
14880                 case FLASH_5720VENDOR_M_ST_M45PE40:
14881                 case FLASH_5720VENDOR_A_ST_M25PE40:
14882                 case FLASH_5720VENDOR_A_ST_M45PE40:
14883                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14884                         break;
14885                 case FLASH_5720VENDOR_M_ST_M25PE80:
14886                 case FLASH_5720VENDOR_M_ST_M45PE80:
14887                 case FLASH_5720VENDOR_A_ST_M25PE80:
14888                 case FLASH_5720VENDOR_A_ST_M45PE80:
14889                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14890                         break;
14891                 default:
14892                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14893                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14894                         break;
14895                 }
14896                 break;
14897         default:
14898                 tg3_flag_set(tp, NO_NVRAM);
14899                 return;
14900         }
14901
14902         tg3_nvram_get_pagesize(tp, nvcfg1);
14903         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14904                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14905
14906         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14907                 u32 val;
14908
14909                 if (tg3_nvram_read(tp, 0, &val))
14910                         return;
14911
14912                 if (val != TG3_EEPROM_MAGIC &&
14913                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14914                         tg3_flag_set(tp, NO_NVRAM);
14915         }
14916 }
14917
14918 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14919 static void tg3_nvram_init(struct tg3 *tp)
14920 {
14921         if (tg3_flag(tp, IS_SSB_CORE)) {
14922                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14923                 tg3_flag_clear(tp, NVRAM);
14924                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14925                 tg3_flag_set(tp, NO_NVRAM);
14926                 return;
14927         }
14928
14929         tw32_f(GRC_EEPROM_ADDR,
14930              (EEPROM_ADDR_FSM_RESET |
14931               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14932                EEPROM_ADDR_CLKPERD_SHIFT)));
14933
14934         msleep(1);
14935
14936         /* Enable seeprom accesses. */
14937         tw32_f(GRC_LOCAL_CTRL,
14938              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14939         udelay(100);
14940
14941         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14942             tg3_asic_rev(tp) != ASIC_REV_5701) {
14943                 tg3_flag_set(tp, NVRAM);
14944
14945                 if (tg3_nvram_lock(tp)) {
14946                         netdev_warn(tp->dev,
14947                                     "Cannot get nvram lock, %s failed\n",
14948                                     __func__);
14949                         return;
14950                 }
14951                 tg3_enable_nvram_access(tp);
14952
14953                 tp->nvram_size = 0;
14954
14955                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14956                         tg3_get_5752_nvram_info(tp);
14957                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14958                         tg3_get_5755_nvram_info(tp);
14959                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14960                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14961                          tg3_asic_rev(tp) == ASIC_REV_5785)
14962                         tg3_get_5787_nvram_info(tp);
14963                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14964                         tg3_get_5761_nvram_info(tp);
14965                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14966                         tg3_get_5906_nvram_info(tp);
14967                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14968                          tg3_flag(tp, 57765_CLASS))
14969                         tg3_get_57780_nvram_info(tp);
14970                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14971                          tg3_asic_rev(tp) == ASIC_REV_5719)
14972                         tg3_get_5717_nvram_info(tp);
14973                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14974                          tg3_asic_rev(tp) == ASIC_REV_5762)
14975                         tg3_get_5720_nvram_info(tp);
14976                 else
14977                         tg3_get_nvram_info(tp);
14978
14979                 if (tp->nvram_size == 0)
14980                         tg3_get_nvram_size(tp);
14981
14982                 tg3_disable_nvram_access(tp);
14983                 tg3_nvram_unlock(tp);
14984
14985         } else {
14986                 tg3_flag_clear(tp, NVRAM);
14987                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14988
14989                 tg3_get_eeprom_size(tp);
14990         }
14991 }
14992
14993 struct subsys_tbl_ent {
14994         u16 subsys_vendor, subsys_devid;
14995         u32 phy_id;
14996 };
14997
14998 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14999         /* Broadcom boards. */
15000         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15001           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15002         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15003           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15004         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15005           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15006         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15007           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15008         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15009           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15010         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15011           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15012         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15013           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15014         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15015           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15016         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15017           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15018         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15019           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15020         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15021           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15022
15023         /* 3com boards. */
15024         { TG3PCI_SUBVENDOR_ID_3COM,
15025           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15026         { TG3PCI_SUBVENDOR_ID_3COM,
15027           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15028         { TG3PCI_SUBVENDOR_ID_3COM,
15029           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15030         { TG3PCI_SUBVENDOR_ID_3COM,
15031           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15032         { TG3PCI_SUBVENDOR_ID_3COM,
15033           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15034
15035         /* DELL boards. */
15036         { TG3PCI_SUBVENDOR_ID_DELL,
15037           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15038         { TG3PCI_SUBVENDOR_ID_DELL,
15039           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15040         { TG3PCI_SUBVENDOR_ID_DELL,
15041           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15042         { TG3PCI_SUBVENDOR_ID_DELL,
15043           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15044
15045         /* Compaq boards. */
15046         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15047           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15048         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15049           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15050         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15051           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15052         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15053           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15054         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15055           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15056
15057         /* IBM boards. */
15058         { TG3PCI_SUBVENDOR_ID_IBM,
15059           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15060 };
15061
15062 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15063 {
15064         int i;
15065
15066         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15067                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15068                      tp->pdev->subsystem_vendor) &&
15069                     (subsys_id_to_phy_id[i].subsys_devid ==
15070                      tp->pdev->subsystem_device))
15071                         return &subsys_id_to_phy_id[i];
15072         }
15073         return NULL;
15074 }
15075
15076 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15077 {
15078         u32 val;
15079
15080         tp->phy_id = TG3_PHY_ID_INVALID;
15081         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15082
15083         /* Assume an onboard device and WOL capable by default.  */
15084         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15085         tg3_flag_set(tp, WOL_CAP);
15086
15087         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15088                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15089                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15090                         tg3_flag_set(tp, IS_NIC);
15091                 }
15092                 val = tr32(VCPU_CFGSHDW);
15093                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15094                         tg3_flag_set(tp, ASPM_WORKAROUND);
15095                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15096                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15097                         tg3_flag_set(tp, WOL_ENABLE);
15098                         device_set_wakeup_enable(&tp->pdev->dev, true);
15099                 }
15100                 goto done;
15101         }
15102
15103         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15104         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15105                 u32 nic_cfg, led_cfg;
15106                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15107                 u32 nic_phy_id, ver, eeprom_phy_id;
15108                 int eeprom_phy_serdes = 0;
15109
15110                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15111                 tp->nic_sram_data_cfg = nic_cfg;
15112
15113                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15114                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15115                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15116                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15117                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15118                     (ver > 0) && (ver < 0x100))
15119                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15120
15121                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15122                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15123
15124                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15125                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15126                     tg3_asic_rev(tp) == ASIC_REV_5720)
15127                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15128
15129                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15130                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15131                         eeprom_phy_serdes = 1;
15132
15133                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15134                 if (nic_phy_id != 0) {
15135                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15136                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15137
15138                         eeprom_phy_id  = (id1 >> 16) << 10;
15139                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15140                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15141                 } else
15142                         eeprom_phy_id = 0;
15143
15144                 tp->phy_id = eeprom_phy_id;
15145                 if (eeprom_phy_serdes) {
15146                         if (!tg3_flag(tp, 5705_PLUS))
15147                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15148                         else
15149                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15150                 }
15151
15152                 if (tg3_flag(tp, 5750_PLUS))
15153                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15154                                     SHASTA_EXT_LED_MODE_MASK);
15155                 else
15156                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15157
15158                 switch (led_cfg) {
15159                 default:
15160                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15161                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15162                         break;
15163
15164                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15165                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15166                         break;
15167
15168                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15169                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15170
15171                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15172                          * read on some older 5700/5701 bootcode.
15173                          */
15174                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15175                             tg3_asic_rev(tp) == ASIC_REV_5701)
15176                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15177
15178                         break;
15179
15180                 case SHASTA_EXT_LED_SHARED:
15181                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15182                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15183                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15184                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15185                                                  LED_CTRL_MODE_PHY_2);
15186
15187                         if (tg3_flag(tp, 5717_PLUS) ||
15188                             tg3_asic_rev(tp) == ASIC_REV_5762)
15189                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15190                                                 LED_CTRL_BLINK_RATE_MASK;
15191
15192                         break;
15193
15194                 case SHASTA_EXT_LED_MAC:
15195                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15196                         break;
15197
15198                 case SHASTA_EXT_LED_COMBO:
15199                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15200                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15201                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15202                                                  LED_CTRL_MODE_PHY_2);
15203                         break;
15204
15205                 }
15206
15207                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15208                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15209                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15210                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15211
15212                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15213                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15214
15215                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15216                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15217                         if ((tp->pdev->subsystem_vendor ==
15218                              PCI_VENDOR_ID_ARIMA) &&
15219                             (tp->pdev->subsystem_device == 0x205a ||
15220                              tp->pdev->subsystem_device == 0x2063))
15221                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15222                 } else {
15223                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15224                         tg3_flag_set(tp, IS_NIC);
15225                 }
15226
15227                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15228                         tg3_flag_set(tp, ENABLE_ASF);
15229                         if (tg3_flag(tp, 5750_PLUS))
15230                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15231                 }
15232
15233                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15234                     tg3_flag(tp, 5750_PLUS))
15235                         tg3_flag_set(tp, ENABLE_APE);
15236
15237                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15238                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15239                         tg3_flag_clear(tp, WOL_CAP);
15240
15241                 if (tg3_flag(tp, WOL_CAP) &&
15242                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15243                         tg3_flag_set(tp, WOL_ENABLE);
15244                         device_set_wakeup_enable(&tp->pdev->dev, true);
15245                 }
15246
15247                 if (cfg2 & (1 << 17))
15248                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15249
15250                 /* serdes signal pre-emphasis in register 0x590 set by */
15251                 /* bootcode if bit 18 is set */
15252                 if (cfg2 & (1 << 18))
15253                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15254
15255                 if ((tg3_flag(tp, 57765_PLUS) ||
15256                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15257                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15258                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15259                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15260
15261                 if (tg3_flag(tp, PCI_EXPRESS)) {
15262                         u32 cfg3;
15263
15264                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15265                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15266                             !tg3_flag(tp, 57765_PLUS) &&
15267                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15268                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15269                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15270                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15271                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15272                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15273                 }
15274
15275                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15276                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15277                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15278                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15279                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15280                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15281
15282                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15283                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15284         }
15285 done:
15286         if (tg3_flag(tp, WOL_CAP))
15287                 device_set_wakeup_enable(&tp->pdev->dev,
15288                                          tg3_flag(tp, WOL_ENABLE));
15289         else
15290                 device_set_wakeup_capable(&tp->pdev->dev, false);
15291 }
15292
15293 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15294 {
15295         int i, err;
15296         u32 val2, off = offset * 8;
15297
15298         err = tg3_nvram_lock(tp);
15299         if (err)
15300                 return err;
15301
15302         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15303         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15304                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15305         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15306         udelay(10);
15307
15308         for (i = 0; i < 100; i++) {
15309                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15310                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15311                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15312                         break;
15313                 }
15314                 udelay(10);
15315         }
15316
15317         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15318
15319         tg3_nvram_unlock(tp);
15320         if (val2 & APE_OTP_STATUS_CMD_DONE)
15321                 return 0;
15322
15323         return -EBUSY;
15324 }
15325
15326 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15327 {
15328         int i;
15329         u32 val;
15330
15331         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15332         tw32(OTP_CTRL, cmd);
15333
15334         /* Wait for up to 1 ms for command to execute. */
15335         for (i = 0; i < 100; i++) {
15336                 val = tr32(OTP_STATUS);
15337                 if (val & OTP_STATUS_CMD_DONE)
15338                         break;
15339                 udelay(10);
15340         }
15341
15342         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15343 }
15344
15345 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15346  * configuration is a 32-bit value that straddles the alignment boundary.
15347  * We do two 32-bit reads and then shift and merge the results.
15348  */
15349 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15350 {
15351         u32 bhalf_otp, thalf_otp;
15352
15353         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15354
15355         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15356                 return 0;
15357
15358         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15359
15360         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15361                 return 0;
15362
15363         thalf_otp = tr32(OTP_READ_DATA);
15364
15365         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15366
15367         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15368                 return 0;
15369
15370         bhalf_otp = tr32(OTP_READ_DATA);
15371
15372         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15373 }
15374
15375 static void tg3_phy_init_link_config(struct tg3 *tp)
15376 {
15377         u32 adv = ADVERTISED_Autoneg;
15378
15379         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15380                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15381                         adv |= ADVERTISED_1000baseT_Half;
15382                 adv |= ADVERTISED_1000baseT_Full;
15383         }
15384
15385         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15386                 adv |= ADVERTISED_100baseT_Half |
15387                        ADVERTISED_100baseT_Full |
15388                        ADVERTISED_10baseT_Half |
15389                        ADVERTISED_10baseT_Full |
15390                        ADVERTISED_TP;
15391         else
15392                 adv |= ADVERTISED_FIBRE;
15393
15394         tp->link_config.advertising = adv;
15395         tp->link_config.speed = SPEED_UNKNOWN;
15396         tp->link_config.duplex = DUPLEX_UNKNOWN;
15397         tp->link_config.autoneg = AUTONEG_ENABLE;
15398         tp->link_config.active_speed = SPEED_UNKNOWN;
15399         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15400
15401         tp->old_link = -1;
15402 }
15403
15404 static int tg3_phy_probe(struct tg3 *tp)
15405 {
15406         u32 hw_phy_id_1, hw_phy_id_2;
15407         u32 hw_phy_id, hw_phy_id_masked;
15408         int err;
15409
15410         /* flow control autonegotiation is default behavior */
15411         tg3_flag_set(tp, PAUSE_AUTONEG);
15412         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15413
15414         if (tg3_flag(tp, ENABLE_APE)) {
15415                 switch (tp->pci_fn) {
15416                 case 0:
15417                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15418                         break;
15419                 case 1:
15420                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15421                         break;
15422                 case 2:
15423                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15424                         break;
15425                 case 3:
15426                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15427                         break;
15428                 }
15429         }
15430
15431         if (!tg3_flag(tp, ENABLE_ASF) &&
15432             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15433             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15434                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15435                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15436
15437         if (tg3_flag(tp, USE_PHYLIB))
15438                 return tg3_phy_init(tp);
15439
15440         /* Reading the PHY ID register can conflict with ASF
15441          * firmware access to the PHY hardware.
15442          */
15443         err = 0;
15444         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15445                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15446         } else {
15447                 /* Now read the physical PHY_ID from the chip and verify
15448                  * that it is sane.  If it doesn't look good, we fall back
15449                  * to either the hard-coded table based PHY_ID and failing
15450                  * that the value found in the eeprom area.
15451                  */
15452                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15453                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15454
15455                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15456                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15457                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15458
15459                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15460         }
15461
15462         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15463                 tp->phy_id = hw_phy_id;
15464                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15465                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15466                 else
15467                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15468         } else {
15469                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15470                         /* Do nothing, phy ID already set up in
15471                          * tg3_get_eeprom_hw_cfg().
15472                          */
15473                 } else {
15474                         struct subsys_tbl_ent *p;
15475
15476                         /* No eeprom signature?  Try the hardcoded
15477                          * subsys device table.
15478                          */
15479                         p = tg3_lookup_by_subsys(tp);
15480                         if (p) {
15481                                 tp->phy_id = p->phy_id;
15482                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15483                                 /* For now we saw the IDs 0xbc050cd0,
15484                                  * 0xbc050f80 and 0xbc050c30 on devices
15485                                  * connected to an BCM4785 and there are
15486                                  * probably more. Just assume that the phy is
15487                                  * supported when it is connected to a SSB core
15488                                  * for now.
15489                                  */
15490                                 return -ENODEV;
15491                         }
15492
15493                         if (!tp->phy_id ||
15494                             tp->phy_id == TG3_PHY_ID_BCM8002)
15495                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15496                 }
15497         }
15498
15499         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15500             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15501              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15502              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15503              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15504              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15505               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15506              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15507               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15508                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15509
15510                 tp->eee.supported = SUPPORTED_100baseT_Full |
15511                                     SUPPORTED_1000baseT_Full;
15512                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15513                                      ADVERTISED_1000baseT_Full;
15514                 tp->eee.eee_enabled = 1;
15515                 tp->eee.tx_lpi_enabled = 1;
15516                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15517         }
15518
15519         tg3_phy_init_link_config(tp);
15520
15521         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15522             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15523             !tg3_flag(tp, ENABLE_APE) &&
15524             !tg3_flag(tp, ENABLE_ASF)) {
15525                 u32 bmsr, dummy;
15526
15527                 tg3_readphy(tp, MII_BMSR, &bmsr);
15528                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15529                     (bmsr & BMSR_LSTATUS))
15530                         goto skip_phy_reset;
15531
15532                 err = tg3_phy_reset(tp);
15533                 if (err)
15534                         return err;
15535
15536                 tg3_phy_set_wirespeed(tp);
15537
15538                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15539                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15540                                             tp->link_config.flowctrl);
15541
15542                         tg3_writephy(tp, MII_BMCR,
15543                                      BMCR_ANENABLE | BMCR_ANRESTART);
15544                 }
15545         }
15546
15547 skip_phy_reset:
15548         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15549                 err = tg3_init_5401phy_dsp(tp);
15550                 if (err)
15551                         return err;
15552
15553                 err = tg3_init_5401phy_dsp(tp);
15554         }
15555
15556         return err;
15557 }
15558
15559 static void tg3_read_vpd(struct tg3 *tp)
15560 {
15561         u8 *vpd_data;
15562         unsigned int block_end, rosize, len;
15563         u32 vpdlen;
15564         int j, i = 0;
15565
15566         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15567         if (!vpd_data)
15568                 goto out_no_vpd;
15569
15570         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15571         if (i < 0)
15572                 goto out_not_found;
15573
15574         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15575         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15576         i += PCI_VPD_LRDT_TAG_SIZE;
15577
15578         if (block_end > vpdlen)
15579                 goto out_not_found;
15580
15581         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15582                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15583         if (j > 0) {
15584                 len = pci_vpd_info_field_size(&vpd_data[j]);
15585
15586                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15587                 if (j + len > block_end || len != 4 ||
15588                     memcmp(&vpd_data[j], "1028", 4))
15589                         goto partno;
15590
15591                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15592                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15593                 if (j < 0)
15594                         goto partno;
15595
15596                 len = pci_vpd_info_field_size(&vpd_data[j]);
15597
15598                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15599                 if (j + len > block_end)
15600                         goto partno;
15601
15602                 if (len >= sizeof(tp->fw_ver))
15603                         len = sizeof(tp->fw_ver) - 1;
15604                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15605                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15606                          &vpd_data[j]);
15607         }
15608
15609 partno:
15610         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15611                                       PCI_VPD_RO_KEYWORD_PARTNO);
15612         if (i < 0)
15613                 goto out_not_found;
15614
15615         len = pci_vpd_info_field_size(&vpd_data[i]);
15616
15617         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15618         if (len > TG3_BPN_SIZE ||
15619             (len + i) > vpdlen)
15620                 goto out_not_found;
15621
15622         memcpy(tp->board_part_number, &vpd_data[i], len);
15623
15624 out_not_found:
15625         kfree(vpd_data);
15626         if (tp->board_part_number[0])
15627                 return;
15628
15629 out_no_vpd:
15630         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15631                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15632                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15633                         strcpy(tp->board_part_number, "BCM5717");
15634                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15635                         strcpy(tp->board_part_number, "BCM5718");
15636                 else
15637                         goto nomatch;
15638         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15639                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15640                         strcpy(tp->board_part_number, "BCM57780");
15641                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15642                         strcpy(tp->board_part_number, "BCM57760");
15643                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15644                         strcpy(tp->board_part_number, "BCM57790");
15645                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15646                         strcpy(tp->board_part_number, "BCM57788");
15647                 else
15648                         goto nomatch;
15649         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15650                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15651                         strcpy(tp->board_part_number, "BCM57761");
15652                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15653                         strcpy(tp->board_part_number, "BCM57765");
15654                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15655                         strcpy(tp->board_part_number, "BCM57781");
15656                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15657                         strcpy(tp->board_part_number, "BCM57785");
15658                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15659                         strcpy(tp->board_part_number, "BCM57791");
15660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15661                         strcpy(tp->board_part_number, "BCM57795");
15662                 else
15663                         goto nomatch;
15664         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15665                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15666                         strcpy(tp->board_part_number, "BCM57762");
15667                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15668                         strcpy(tp->board_part_number, "BCM57766");
15669                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15670                         strcpy(tp->board_part_number, "BCM57782");
15671                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15672                         strcpy(tp->board_part_number, "BCM57786");
15673                 else
15674                         goto nomatch;
15675         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15676                 strcpy(tp->board_part_number, "BCM95906");
15677         } else {
15678 nomatch:
15679                 strcpy(tp->board_part_number, "none");
15680         }
15681 }
15682
15683 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15684 {
15685         u32 val;
15686
15687         if (tg3_nvram_read(tp, offset, &val) ||
15688             (val & 0xfc000000) != 0x0c000000 ||
15689             tg3_nvram_read(tp, offset + 4, &val) ||
15690             val != 0)
15691                 return 0;
15692
15693         return 1;
15694 }
15695
15696 static void tg3_read_bc_ver(struct tg3 *tp)
15697 {
15698         u32 val, offset, start, ver_offset;
15699         int i, dst_off;
15700         bool newver = false;
15701
15702         if (tg3_nvram_read(tp, 0xc, &offset) ||
15703             tg3_nvram_read(tp, 0x4, &start))
15704                 return;
15705
15706         offset = tg3_nvram_logical_addr(tp, offset);
15707
15708         if (tg3_nvram_read(tp, offset, &val))
15709                 return;
15710
15711         if ((val & 0xfc000000) == 0x0c000000) {
15712                 if (tg3_nvram_read(tp, offset + 4, &val))
15713                         return;
15714
15715                 if (val == 0)
15716                         newver = true;
15717         }
15718
15719         dst_off = strlen(tp->fw_ver);
15720
15721         if (newver) {
15722                 if (TG3_VER_SIZE - dst_off < 16 ||
15723                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15724                         return;
15725
15726                 offset = offset + ver_offset - start;
15727                 for (i = 0; i < 16; i += 4) {
15728                         __be32 v;
15729                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15730                                 return;
15731
15732                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15733                 }
15734         } else {
15735                 u32 major, minor;
15736
15737                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15738                         return;
15739
15740                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15741                         TG3_NVM_BCVER_MAJSFT;
15742                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15743                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15744                          "v%d.%02d", major, minor);
15745         }
15746 }
15747
15748 static void tg3_read_hwsb_ver(struct tg3 *tp)
15749 {
15750         u32 val, major, minor;
15751
15752         /* Use native endian representation */
15753         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15754                 return;
15755
15756         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15757                 TG3_NVM_HWSB_CFG1_MAJSFT;
15758         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15759                 TG3_NVM_HWSB_CFG1_MINSFT;
15760
15761         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15762 }
15763
15764 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15765 {
15766         u32 offset, major, minor, build;
15767
15768         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15769
15770         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15771                 return;
15772
15773         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15774         case TG3_EEPROM_SB_REVISION_0:
15775                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15776                 break;
15777         case TG3_EEPROM_SB_REVISION_2:
15778                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15779                 break;
15780         case TG3_EEPROM_SB_REVISION_3:
15781                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15782                 break;
15783         case TG3_EEPROM_SB_REVISION_4:
15784                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15785                 break;
15786         case TG3_EEPROM_SB_REVISION_5:
15787                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15788                 break;
15789         case TG3_EEPROM_SB_REVISION_6:
15790                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15791                 break;
15792         default:
15793                 return;
15794         }
15795
15796         if (tg3_nvram_read(tp, offset, &val))
15797                 return;
15798
15799         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15800                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15801         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15802                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15803         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15804
15805         if (minor > 99 || build > 26)
15806                 return;
15807
15808         offset = strlen(tp->fw_ver);
15809         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15810                  " v%d.%02d", major, minor);
15811
15812         if (build > 0) {
15813                 offset = strlen(tp->fw_ver);
15814                 if (offset < TG3_VER_SIZE - 1)
15815                         tp->fw_ver[offset] = 'a' + build - 1;
15816         }
15817 }
15818
15819 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15820 {
15821         u32 val, offset, start;
15822         int i, vlen;
15823
15824         for (offset = TG3_NVM_DIR_START;
15825              offset < TG3_NVM_DIR_END;
15826              offset += TG3_NVM_DIRENT_SIZE) {
15827                 if (tg3_nvram_read(tp, offset, &val))
15828                         return;
15829
15830                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15831                         break;
15832         }
15833
15834         if (offset == TG3_NVM_DIR_END)
15835                 return;
15836
15837         if (!tg3_flag(tp, 5705_PLUS))
15838                 start = 0x08000000;
15839         else if (tg3_nvram_read(tp, offset - 4, &start))
15840                 return;
15841
15842         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15843             !tg3_fw_img_is_valid(tp, offset) ||
15844             tg3_nvram_read(tp, offset + 8, &val))
15845                 return;
15846
15847         offset += val - start;
15848
15849         vlen = strlen(tp->fw_ver);
15850
15851         tp->fw_ver[vlen++] = ',';
15852         tp->fw_ver[vlen++] = ' ';
15853
15854         for (i = 0; i < 4; i++) {
15855                 __be32 v;
15856                 if (tg3_nvram_read_be32(tp, offset, &v))
15857                         return;
15858
15859                 offset += sizeof(v);
15860
15861                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15862                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15863                         break;
15864                 }
15865
15866                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15867                 vlen += sizeof(v);
15868         }
15869 }
15870
15871 static void tg3_probe_ncsi(struct tg3 *tp)
15872 {
15873         u32 apedata;
15874
15875         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15876         if (apedata != APE_SEG_SIG_MAGIC)
15877                 return;
15878
15879         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15880         if (!(apedata & APE_FW_STATUS_READY))
15881                 return;
15882
15883         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15884                 tg3_flag_set(tp, APE_HAS_NCSI);
15885 }
15886
15887 static void tg3_read_dash_ver(struct tg3 *tp)
15888 {
15889         int vlen;
15890         u32 apedata;
15891         char *fwtype;
15892
15893         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15894
15895         if (tg3_flag(tp, APE_HAS_NCSI))
15896                 fwtype = "NCSI";
15897         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15898                 fwtype = "SMASH";
15899         else
15900                 fwtype = "DASH";
15901
15902         vlen = strlen(tp->fw_ver);
15903
15904         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15905                  fwtype,
15906                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15907                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15908                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15909                  (apedata & APE_FW_VERSION_BLDMSK));
15910 }
15911
15912 static void tg3_read_otp_ver(struct tg3 *tp)
15913 {
15914         u32 val, val2;
15915
15916         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15917                 return;
15918
15919         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15920             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15921             TG3_OTP_MAGIC0_VALID(val)) {
15922                 u64 val64 = (u64) val << 32 | val2;
15923                 u32 ver = 0;
15924                 int i, vlen;
15925
15926                 for (i = 0; i < 7; i++) {
15927                         if ((val64 & 0xff) == 0)
15928                                 break;
15929                         ver = val64 & 0xff;
15930                         val64 >>= 8;
15931                 }
15932                 vlen = strlen(tp->fw_ver);
15933                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15934         }
15935 }
15936
15937 static void tg3_read_fw_ver(struct tg3 *tp)
15938 {
15939         u32 val;
15940         bool vpd_vers = false;
15941
15942         if (tp->fw_ver[0] != 0)
15943                 vpd_vers = true;
15944
15945         if (tg3_flag(tp, NO_NVRAM)) {
15946                 strcat(tp->fw_ver, "sb");
15947                 tg3_read_otp_ver(tp);
15948                 return;
15949         }
15950
15951         if (tg3_nvram_read(tp, 0, &val))
15952                 return;
15953
15954         if (val == TG3_EEPROM_MAGIC)
15955                 tg3_read_bc_ver(tp);
15956         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15957                 tg3_read_sb_ver(tp, val);
15958         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15959                 tg3_read_hwsb_ver(tp);
15960
15961         if (tg3_flag(tp, ENABLE_ASF)) {
15962                 if (tg3_flag(tp, ENABLE_APE)) {
15963                         tg3_probe_ncsi(tp);
15964                         if (!vpd_vers)
15965                                 tg3_read_dash_ver(tp);
15966                 } else if (!vpd_vers) {
15967                         tg3_read_mgmtfw_ver(tp);
15968                 }
15969         }
15970
15971         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15972 }
15973
15974 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15975 {
15976         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15977                 return TG3_RX_RET_MAX_SIZE_5717;
15978         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15979                 return TG3_RX_RET_MAX_SIZE_5700;
15980         else
15981                 return TG3_RX_RET_MAX_SIZE_5705;
15982 }
15983
15984 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15985         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15986         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15987         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15988         { },
15989 };
15990
15991 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15992 {
15993         struct pci_dev *peer;
15994         unsigned int func, devnr = tp->pdev->devfn & ~7;
15995
15996         for (func = 0; func < 8; func++) {
15997                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15998                 if (peer && peer != tp->pdev)
15999                         break;
16000                 pci_dev_put(peer);
16001         }
16002         /* 5704 can be configured in single-port mode, set peer to
16003          * tp->pdev in that case.
16004          */
16005         if (!peer) {
16006                 peer = tp->pdev;
16007                 return peer;
16008         }
16009
16010         /*
16011          * We don't need to keep the refcount elevated; there's no way
16012          * to remove one half of this device without removing the other
16013          */
16014         pci_dev_put(peer);
16015
16016         return peer;
16017 }
16018
16019 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16020 {
16021         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16022         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16023                 u32 reg;
16024
16025                 /* All devices that use the alternate
16026                  * ASIC REV location have a CPMU.
16027                  */
16028                 tg3_flag_set(tp, CPMU_PRESENT);
16029
16030                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16031                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16032                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16034                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16036                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16038                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16040                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16041                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16042                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16043                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16045                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16046                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16047                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16048                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16049                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16050                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16051                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16052                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16053                 else
16054                         reg = TG3PCI_PRODID_ASICREV;
16055
16056                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16057         }
16058
16059         /* Wrong chip ID in 5752 A0. This code can be removed later
16060          * as A0 is not in production.
16061          */
16062         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16063                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16064
16065         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16066                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16067
16068         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16069             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16070             tg3_asic_rev(tp) == ASIC_REV_5720)
16071                 tg3_flag_set(tp, 5717_PLUS);
16072
16073         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16074             tg3_asic_rev(tp) == ASIC_REV_57766)
16075                 tg3_flag_set(tp, 57765_CLASS);
16076
16077         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16078              tg3_asic_rev(tp) == ASIC_REV_5762)
16079                 tg3_flag_set(tp, 57765_PLUS);
16080
16081         /* Intentionally exclude ASIC_REV_5906 */
16082         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16083             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16084             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16085             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16086             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16087             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16088             tg3_flag(tp, 57765_PLUS))
16089                 tg3_flag_set(tp, 5755_PLUS);
16090
16091         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16092             tg3_asic_rev(tp) == ASIC_REV_5714)
16093                 tg3_flag_set(tp, 5780_CLASS);
16094
16095         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16096             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16097             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16098             tg3_flag(tp, 5755_PLUS) ||
16099             tg3_flag(tp, 5780_CLASS))
16100                 tg3_flag_set(tp, 5750_PLUS);
16101
16102         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16103             tg3_flag(tp, 5750_PLUS))
16104                 tg3_flag_set(tp, 5705_PLUS);
16105 }
16106
16107 static bool tg3_10_100_only_device(struct tg3 *tp,
16108                                    const struct pci_device_id *ent)
16109 {
16110         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16111
16112         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16113              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16114             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16115                 return true;
16116
16117         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16118                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16119                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16120                                 return true;
16121                 } else {
16122                         return true;
16123                 }
16124         }
16125
16126         return false;
16127 }
16128
16129 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16130 {
16131         u32 misc_ctrl_reg;
16132         u32 pci_state_reg, grc_misc_cfg;
16133         u32 val;
16134         u16 pci_cmd;
16135         int err;
16136
16137         /* Force memory write invalidate off.  If we leave it on,
16138          * then on 5700_BX chips we have to enable a workaround.
16139          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16140          * to match the cacheline size.  The Broadcom driver have this
16141          * workaround but turns MWI off all the times so never uses
16142          * it.  This seems to suggest that the workaround is insufficient.
16143          */
16144         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16145         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16146         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16147
16148         /* Important! -- Make sure register accesses are byteswapped
16149          * correctly.  Also, for those chips that require it, make
16150          * sure that indirect register accesses are enabled before
16151          * the first operation.
16152          */
16153         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16154                               &misc_ctrl_reg);
16155         tp->misc_host_ctrl |= (misc_ctrl_reg &
16156                                MISC_HOST_CTRL_CHIPREV);
16157         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16158                                tp->misc_host_ctrl);
16159
16160         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16161
16162         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16163          * we need to disable memory and use config. cycles
16164          * only to access all registers. The 5702/03 chips
16165          * can mistakenly decode the special cycles from the
16166          * ICH chipsets as memory write cycles, causing corruption
16167          * of register and memory space. Only certain ICH bridges
16168          * will drive special cycles with non-zero data during the
16169          * address phase which can fall within the 5703's address
16170          * range. This is not an ICH bug as the PCI spec allows
16171          * non-zero address during special cycles. However, only
16172          * these ICH bridges are known to drive non-zero addresses
16173          * during special cycles.
16174          *
16175          * Since special cycles do not cross PCI bridges, we only
16176          * enable this workaround if the 5703 is on the secondary
16177          * bus of these ICH bridges.
16178          */
16179         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16180             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16181                 static struct tg3_dev_id {
16182                         u32     vendor;
16183                         u32     device;
16184                         u32     rev;
16185                 } ich_chipsets[] = {
16186                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16187                           PCI_ANY_ID },
16188                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16189                           PCI_ANY_ID },
16190                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16191                           0xa },
16192                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16193                           PCI_ANY_ID },
16194                         { },
16195                 };
16196                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16197                 struct pci_dev *bridge = NULL;
16198
16199                 while (pci_id->vendor != 0) {
16200                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16201                                                 bridge);
16202                         if (!bridge) {
16203                                 pci_id++;
16204                                 continue;
16205                         }
16206                         if (pci_id->rev != PCI_ANY_ID) {
16207                                 if (bridge->revision > pci_id->rev)
16208                                         continue;
16209                         }
16210                         if (bridge->subordinate &&
16211                             (bridge->subordinate->number ==
16212                              tp->pdev->bus->number)) {
16213                                 tg3_flag_set(tp, ICH_WORKAROUND);
16214                                 pci_dev_put(bridge);
16215                                 break;
16216                         }
16217                 }
16218         }
16219
16220         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16221                 static struct tg3_dev_id {
16222                         u32     vendor;
16223                         u32     device;
16224                 } bridge_chipsets[] = {
16225                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16226                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16227                         { },
16228                 };
16229                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16230                 struct pci_dev *bridge = NULL;
16231
16232                 while (pci_id->vendor != 0) {
16233                         bridge = pci_get_device(pci_id->vendor,
16234                                                 pci_id->device,
16235                                                 bridge);
16236                         if (!bridge) {
16237                                 pci_id++;
16238                                 continue;
16239                         }
16240                         if (bridge->subordinate &&
16241                             (bridge->subordinate->number <=
16242                              tp->pdev->bus->number) &&
16243                             (bridge->subordinate->busn_res.end >=
16244                              tp->pdev->bus->number)) {
16245                                 tg3_flag_set(tp, 5701_DMA_BUG);
16246                                 pci_dev_put(bridge);
16247                                 break;
16248                         }
16249                 }
16250         }
16251
16252         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16253          * DMA addresses > 40-bit. This bridge may have other additional
16254          * 57xx devices behind it in some 4-port NIC designs for example.
16255          * Any tg3 device found behind the bridge will also need the 40-bit
16256          * DMA workaround.
16257          */
16258         if (tg3_flag(tp, 5780_CLASS)) {
16259                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16260                 tp->msi_cap = tp->pdev->msi_cap;
16261         } else {
16262                 struct pci_dev *bridge = NULL;
16263
16264                 do {
16265                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16266                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16267                                                 bridge);
16268                         if (bridge && bridge->subordinate &&
16269                             (bridge->subordinate->number <=
16270                              tp->pdev->bus->number) &&
16271                             (bridge->subordinate->busn_res.end >=
16272                              tp->pdev->bus->number)) {
16273                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16274                                 pci_dev_put(bridge);
16275                                 break;
16276                         }
16277                 } while (bridge);
16278         }
16279
16280         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16281             tg3_asic_rev(tp) == ASIC_REV_5714)
16282                 tp->pdev_peer = tg3_find_peer(tp);
16283
16284         /* Determine TSO capabilities */
16285         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16286                 ; /* Do nothing. HW bug. */
16287         else if (tg3_flag(tp, 57765_PLUS))
16288                 tg3_flag_set(tp, HW_TSO_3);
16289         else if (tg3_flag(tp, 5755_PLUS) ||
16290                  tg3_asic_rev(tp) == ASIC_REV_5906)
16291                 tg3_flag_set(tp, HW_TSO_2);
16292         else if (tg3_flag(tp, 5750_PLUS)) {
16293                 tg3_flag_set(tp, HW_TSO_1);
16294                 tg3_flag_set(tp, TSO_BUG);
16295                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16296                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16297                         tg3_flag_clear(tp, TSO_BUG);
16298         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16299                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16300                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16301                 tg3_flag_set(tp, FW_TSO);
16302                 tg3_flag_set(tp, TSO_BUG);
16303                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16304                         tp->fw_needed = FIRMWARE_TG3TSO5;
16305                 else
16306                         tp->fw_needed = FIRMWARE_TG3TSO;
16307         }
16308
16309         /* Selectively allow TSO based on operating conditions */
16310         if (tg3_flag(tp, HW_TSO_1) ||
16311             tg3_flag(tp, HW_TSO_2) ||
16312             tg3_flag(tp, HW_TSO_3) ||
16313             tg3_flag(tp, FW_TSO)) {
16314                 /* For firmware TSO, assume ASF is disabled.
16315                  * We'll disable TSO later if we discover ASF
16316                  * is enabled in tg3_get_eeprom_hw_cfg().
16317                  */
16318                 tg3_flag_set(tp, TSO_CAPABLE);
16319         } else {
16320                 tg3_flag_clear(tp, TSO_CAPABLE);
16321                 tg3_flag_clear(tp, TSO_BUG);
16322                 tp->fw_needed = NULL;
16323         }
16324
16325         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16326                 tp->fw_needed = FIRMWARE_TG3;
16327
16328         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16329                 tp->fw_needed = FIRMWARE_TG357766;
16330
16331         tp->irq_max = 1;
16332
16333         if (tg3_flag(tp, 5750_PLUS)) {
16334                 tg3_flag_set(tp, SUPPORT_MSI);
16335                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16336                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16337                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16338                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16339                      tp->pdev_peer == tp->pdev))
16340                         tg3_flag_clear(tp, SUPPORT_MSI);
16341
16342                 if (tg3_flag(tp, 5755_PLUS) ||
16343                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16344                         tg3_flag_set(tp, 1SHOT_MSI);
16345                 }
16346
16347                 if (tg3_flag(tp, 57765_PLUS)) {
16348                         tg3_flag_set(tp, SUPPORT_MSIX);
16349                         tp->irq_max = TG3_IRQ_MAX_VECS;
16350                 }
16351         }
16352
16353         tp->txq_max = 1;
16354         tp->rxq_max = 1;
16355         if (tp->irq_max > 1) {
16356                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16357                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16358
16359                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16360                     tg3_asic_rev(tp) == ASIC_REV_5720)
16361                         tp->txq_max = tp->irq_max - 1;
16362         }
16363
16364         if (tg3_flag(tp, 5755_PLUS) ||
16365             tg3_asic_rev(tp) == ASIC_REV_5906)
16366                 tg3_flag_set(tp, SHORT_DMA_BUG);
16367
16368         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16369                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16370
16371         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16372             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16373             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16374             tg3_asic_rev(tp) == ASIC_REV_5762)
16375                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16376
16377         if (tg3_flag(tp, 57765_PLUS) &&
16378             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16379                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16380
16381         if (!tg3_flag(tp, 5705_PLUS) ||
16382             tg3_flag(tp, 5780_CLASS) ||
16383             tg3_flag(tp, USE_JUMBO_BDFLAG))
16384                 tg3_flag_set(tp, JUMBO_CAPABLE);
16385
16386         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16387                               &pci_state_reg);
16388
16389         if (pci_is_pcie(tp->pdev)) {
16390                 u16 lnkctl;
16391
16392                 tg3_flag_set(tp, PCI_EXPRESS);
16393
16394                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16395                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16396                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16397                                 tg3_flag_clear(tp, HW_TSO_2);
16398                                 tg3_flag_clear(tp, TSO_CAPABLE);
16399                         }
16400                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16401                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16402                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16403                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16404                                 tg3_flag_set(tp, CLKREQ_BUG);
16405                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16406                         tg3_flag_set(tp, L1PLLPD_EN);
16407                 }
16408         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16409                 /* BCM5785 devices are effectively PCIe devices, and should
16410                  * follow PCIe codepaths, but do not have a PCIe capabilities
16411                  * section.
16412                  */
16413                 tg3_flag_set(tp, PCI_EXPRESS);
16414         } else if (!tg3_flag(tp, 5705_PLUS) ||
16415                    tg3_flag(tp, 5780_CLASS)) {
16416                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16417                 if (!tp->pcix_cap) {
16418                         dev_err(&tp->pdev->dev,
16419                                 "Cannot find PCI-X capability, aborting\n");
16420                         return -EIO;
16421                 }
16422
16423                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16424                         tg3_flag_set(tp, PCIX_MODE);
16425         }
16426
16427         /* If we have an AMD 762 or VIA K8T800 chipset, write
16428          * reordering to the mailbox registers done by the host
16429          * controller can cause major troubles.  We read back from
16430          * every mailbox register write to force the writes to be
16431          * posted to the chip in order.
16432          */
16433         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16434             !tg3_flag(tp, PCI_EXPRESS))
16435                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16436
16437         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16438                              &tp->pci_cacheline_sz);
16439         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16440                              &tp->pci_lat_timer);
16441         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16442             tp->pci_lat_timer < 64) {
16443                 tp->pci_lat_timer = 64;
16444                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16445                                       tp->pci_lat_timer);
16446         }
16447
16448         /* Important! -- It is critical that the PCI-X hw workaround
16449          * situation is decided before the first MMIO register access.
16450          */
16451         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16452                 /* 5700 BX chips need to have their TX producer index
16453                  * mailboxes written twice to workaround a bug.
16454                  */
16455                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16456
16457                 /* If we are in PCI-X mode, enable register write workaround.
16458                  *
16459                  * The workaround is to use indirect register accesses
16460                  * for all chip writes not to mailbox registers.
16461                  */
16462                 if (tg3_flag(tp, PCIX_MODE)) {
16463                         u32 pm_reg;
16464
16465                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16466
16467                         /* The chip can have it's power management PCI config
16468                          * space registers clobbered due to this bug.
16469                          * So explicitly force the chip into D0 here.
16470                          */
16471                         pci_read_config_dword(tp->pdev,
16472                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16473                                               &pm_reg);
16474                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16475                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16476                         pci_write_config_dword(tp->pdev,
16477                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16478                                                pm_reg);
16479
16480                         /* Also, force SERR#/PERR# in PCI command. */
16481                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16482                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16483                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16484                 }
16485         }
16486
16487         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16488                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16489         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16490                 tg3_flag_set(tp, PCI_32BIT);
16491
16492         /* Chip-specific fixup from Broadcom driver */
16493         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16494             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16495                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16496                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16497         }
16498
16499         /* Default fast path register access methods */
16500         tp->read32 = tg3_read32;
16501         tp->write32 = tg3_write32;
16502         tp->read32_mbox = tg3_read32;
16503         tp->write32_mbox = tg3_write32;
16504         tp->write32_tx_mbox = tg3_write32;
16505         tp->write32_rx_mbox = tg3_write32;
16506
16507         /* Various workaround register access methods */
16508         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16509                 tp->write32 = tg3_write_indirect_reg32;
16510         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16511                  (tg3_flag(tp, PCI_EXPRESS) &&
16512                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16513                 /*
16514                  * Back to back register writes can cause problems on these
16515                  * chips, the workaround is to read back all reg writes
16516                  * except those to mailbox regs.
16517                  *
16518                  * See tg3_write_indirect_reg32().
16519                  */
16520                 tp->write32 = tg3_write_flush_reg32;
16521         }
16522
16523         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16524                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16525                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16526                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16527         }
16528
16529         if (tg3_flag(tp, ICH_WORKAROUND)) {
16530                 tp->read32 = tg3_read_indirect_reg32;
16531                 tp->write32 = tg3_write_indirect_reg32;
16532                 tp->read32_mbox = tg3_read_indirect_mbox;
16533                 tp->write32_mbox = tg3_write_indirect_mbox;
16534                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16535                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16536
16537                 iounmap(tp->regs);
16538                 tp->regs = NULL;
16539
16540                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16541                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16542                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16543         }
16544         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16545                 tp->read32_mbox = tg3_read32_mbox_5906;
16546                 tp->write32_mbox = tg3_write32_mbox_5906;
16547                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16548                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16549         }
16550
16551         if (tp->write32 == tg3_write_indirect_reg32 ||
16552             (tg3_flag(tp, PCIX_MODE) &&
16553              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16554               tg3_asic_rev(tp) == ASIC_REV_5701)))
16555                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16556
16557         /* The memory arbiter has to be enabled in order for SRAM accesses
16558          * to succeed.  Normally on powerup the tg3 chip firmware will make
16559          * sure it is enabled, but other entities such as system netboot
16560          * code might disable it.
16561          */
16562         val = tr32(MEMARB_MODE);
16563         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16564
16565         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16566         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16567             tg3_flag(tp, 5780_CLASS)) {
16568                 if (tg3_flag(tp, PCIX_MODE)) {
16569                         pci_read_config_dword(tp->pdev,
16570                                               tp->pcix_cap + PCI_X_STATUS,
16571                                               &val);
16572                         tp->pci_fn = val & 0x7;
16573                 }
16574         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16575                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16576                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16577                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16578                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16579                         val = tr32(TG3_CPMU_STATUS);
16580
16581                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16582                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16583                 else
16584                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16585                                      TG3_CPMU_STATUS_FSHFT_5719;
16586         }
16587
16588         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16589                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16590                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16591         }
16592
16593         /* Get eeprom hw config before calling tg3_set_power_state().
16594          * In particular, the TG3_FLAG_IS_NIC flag must be
16595          * determined before calling tg3_set_power_state() so that
16596          * we know whether or not to switch out of Vaux power.
16597          * When the flag is set, it means that GPIO1 is used for eeprom
16598          * write protect and also implies that it is a LOM where GPIOs
16599          * are not used to switch power.
16600          */
16601         tg3_get_eeprom_hw_cfg(tp);
16602
16603         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16604                 tg3_flag_clear(tp, TSO_CAPABLE);
16605                 tg3_flag_clear(tp, TSO_BUG);
16606                 tp->fw_needed = NULL;
16607         }
16608
16609         if (tg3_flag(tp, ENABLE_APE)) {
16610                 /* Allow reads and writes to the
16611                  * APE register and memory space.
16612                  */
16613                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16614                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16615                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16616                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16617                                        pci_state_reg);
16618
16619                 tg3_ape_lock_init(tp);
16620         }
16621
16622         /* Set up tp->grc_local_ctrl before calling
16623          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16624          * will bring 5700's external PHY out of reset.
16625          * It is also used as eeprom write protect on LOMs.
16626          */
16627         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16628         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16629             tg3_flag(tp, EEPROM_WRITE_PROT))
16630                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16631                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16632         /* Unused GPIO3 must be driven as output on 5752 because there
16633          * are no pull-up resistors on unused GPIO pins.
16634          */
16635         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16636                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16637
16638         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16639             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16640             tg3_flag(tp, 57765_CLASS))
16641                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16642
16643         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16644             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16645                 /* Turn off the debug UART. */
16646                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16647                 if (tg3_flag(tp, IS_NIC))
16648                         /* Keep VMain power. */
16649                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16650                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16651         }
16652
16653         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16654                 tp->grc_local_ctrl |=
16655                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16656
16657         /* Switch out of Vaux if it is a NIC */
16658         tg3_pwrsrc_switch_to_vmain(tp);
16659
16660         /* Derive initial jumbo mode from MTU assigned in
16661          * ether_setup() via the alloc_etherdev() call
16662          */
16663         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16664                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16665
16666         /* Determine WakeOnLan speed to use. */
16667         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16668             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16669             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16670             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16671                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16672         } else {
16673                 tg3_flag_set(tp, WOL_SPEED_100MB);
16674         }
16675
16676         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16677                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16678
16679         /* A few boards don't want Ethernet@WireSpeed phy feature */
16680         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16681             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16682              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16683              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16684             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16685             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16686                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16687
16688         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16689             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16690                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16691         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16692                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16693
16694         if (tg3_flag(tp, 5705_PLUS) &&
16695             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16696             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16697             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16698             !tg3_flag(tp, 57765_PLUS)) {
16699                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16700                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16701                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16702                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16703                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16704                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16705                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16706                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16707                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16708                 } else
16709                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16710         }
16711
16712         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16713             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16714                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16715                 if (tp->phy_otp == 0)
16716                         tp->phy_otp = TG3_OTP_DEFAULT;
16717         }
16718
16719         if (tg3_flag(tp, CPMU_PRESENT))
16720                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16721         else
16722                 tp->mi_mode = MAC_MI_MODE_BASE;
16723
16724         tp->coalesce_mode = 0;
16725         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16726             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16727                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16728
16729         /* Set these bits to enable statistics workaround. */
16730         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16731             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16732             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16733             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16734                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16735                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16736         }
16737
16738         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16739             tg3_asic_rev(tp) == ASIC_REV_57780)
16740                 tg3_flag_set(tp, USE_PHYLIB);
16741
16742         err = tg3_mdio_init(tp);
16743         if (err)
16744                 return err;
16745
16746         /* Initialize data/descriptor byte/word swapping. */
16747         val = tr32(GRC_MODE);
16748         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16749             tg3_asic_rev(tp) == ASIC_REV_5762)
16750                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16751                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16752                         GRC_MODE_B2HRX_ENABLE |
16753                         GRC_MODE_HTX2B_ENABLE |
16754                         GRC_MODE_HOST_STACKUP);
16755         else
16756                 val &= GRC_MODE_HOST_STACKUP;
16757
16758         tw32(GRC_MODE, val | tp->grc_mode);
16759
16760         tg3_switch_clocks(tp);
16761
16762         /* Clear this out for sanity. */
16763         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16764
16765         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16766         tw32(TG3PCI_REG_BASE_ADDR, 0);
16767
16768         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16769                               &pci_state_reg);
16770         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16771             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16772                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16773                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16774                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16775                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16776                         void __iomem *sram_base;
16777
16778                         /* Write some dummy words into the SRAM status block
16779                          * area, see if it reads back correctly.  If the return
16780                          * value is bad, force enable the PCIX workaround.
16781                          */
16782                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16783
16784                         writel(0x00000000, sram_base);
16785                         writel(0x00000000, sram_base + 4);
16786                         writel(0xffffffff, sram_base + 4);
16787                         if (readl(sram_base) != 0x00000000)
16788                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16789                 }
16790         }
16791
16792         udelay(50);
16793         tg3_nvram_init(tp);
16794
16795         /* If the device has an NVRAM, no need to load patch firmware */
16796         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16797             !tg3_flag(tp, NO_NVRAM))
16798                 tp->fw_needed = NULL;
16799
16800         grc_misc_cfg = tr32(GRC_MISC_CFG);
16801         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16802
16803         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16804             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16805              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16806                 tg3_flag_set(tp, IS_5788);
16807
16808         if (!tg3_flag(tp, IS_5788) &&
16809             tg3_asic_rev(tp) != ASIC_REV_5700)
16810                 tg3_flag_set(tp, TAGGED_STATUS);
16811         if (tg3_flag(tp, TAGGED_STATUS)) {
16812                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16813                                       HOSTCC_MODE_CLRTICK_TXBD);
16814
16815                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16816                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16817                                        tp->misc_host_ctrl);
16818         }
16819
16820         /* Preserve the APE MAC_MODE bits */
16821         if (tg3_flag(tp, ENABLE_APE))
16822                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16823         else
16824                 tp->mac_mode = 0;
16825
16826         if (tg3_10_100_only_device(tp, ent))
16827                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16828
16829         err = tg3_phy_probe(tp);
16830         if (err) {
16831                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16832                 /* ... but do not return immediately ... */
16833                 tg3_mdio_fini(tp);
16834         }
16835
16836         tg3_read_vpd(tp);
16837         tg3_read_fw_ver(tp);
16838
16839         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16840                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16841         } else {
16842                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16843                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16844                 else
16845                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16846         }
16847
16848         /* 5700 {AX,BX} chips have a broken status block link
16849          * change bit implementation, so we must use the
16850          * status register in those cases.
16851          */
16852         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16853                 tg3_flag_set(tp, USE_LINKCHG_REG);
16854         else
16855                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16856
16857         /* The led_ctrl is set during tg3_phy_probe, here we might
16858          * have to force the link status polling mechanism based
16859          * upon subsystem IDs.
16860          */
16861         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16862             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16863             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16864                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16865                 tg3_flag_set(tp, USE_LINKCHG_REG);
16866         }
16867
16868         /* For all SERDES we poll the MAC status register. */
16869         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16870                 tg3_flag_set(tp, POLL_SERDES);
16871         else
16872                 tg3_flag_clear(tp, POLL_SERDES);
16873
16874         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16875                 tg3_flag_set(tp, POLL_CPMU_LINK);
16876
16877         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16878         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16879         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16880             tg3_flag(tp, PCIX_MODE)) {
16881                 tp->rx_offset = NET_SKB_PAD;
16882 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16883                 tp->rx_copy_thresh = ~(u16)0;
16884 #endif
16885         }
16886
16887         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16888         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16889         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16890
16891         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16892
16893         /* Increment the rx prod index on the rx std ring by at most
16894          * 8 for these chips to workaround hw errata.
16895          */
16896         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16897             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16898             tg3_asic_rev(tp) == ASIC_REV_5755)
16899                 tp->rx_std_max_post = 8;
16900
16901         if (tg3_flag(tp, ASPM_WORKAROUND))
16902                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16903                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16904
16905         return err;
16906 }
16907
16908 #ifdef CONFIG_SPARC
16909 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16910 {
16911         struct net_device *dev = tp->dev;
16912         struct pci_dev *pdev = tp->pdev;
16913         struct device_node *dp = pci_device_to_OF_node(pdev);
16914         const unsigned char *addr;
16915         int len;
16916
16917         addr = of_get_property(dp, "local-mac-address", &len);
16918         if (addr && len == ETH_ALEN) {
16919                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16920                 return 0;
16921         }
16922         return -ENODEV;
16923 }
16924
16925 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16926 {
16927         struct net_device *dev = tp->dev;
16928
16929         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16930         return 0;
16931 }
16932 #endif
16933
16934 static int tg3_get_device_address(struct tg3 *tp)
16935 {
16936         struct net_device *dev = tp->dev;
16937         u32 hi, lo, mac_offset;
16938         int addr_ok = 0;
16939         int err;
16940
16941 #ifdef CONFIG_SPARC
16942         if (!tg3_get_macaddr_sparc(tp))
16943                 return 0;
16944 #endif
16945
16946         if (tg3_flag(tp, IS_SSB_CORE)) {
16947                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16948                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16949                         return 0;
16950         }
16951
16952         mac_offset = 0x7c;
16953         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16954             tg3_flag(tp, 5780_CLASS)) {
16955                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16956                         mac_offset = 0xcc;
16957                 if (tg3_nvram_lock(tp))
16958                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16959                 else
16960                         tg3_nvram_unlock(tp);
16961         } else if (tg3_flag(tp, 5717_PLUS)) {
16962                 if (tp->pci_fn & 1)
16963                         mac_offset = 0xcc;
16964                 if (tp->pci_fn > 1)
16965                         mac_offset += 0x18c;
16966         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16967                 mac_offset = 0x10;
16968
16969         /* First try to get it from MAC address mailbox. */
16970         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16971         if ((hi >> 16) == 0x484b) {
16972                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16973                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16974
16975                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16976                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16977                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16978                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16979                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16980
16981                 /* Some old bootcode may report a 0 MAC address in SRAM */
16982                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16983         }
16984         if (!addr_ok) {
16985                 /* Next, try NVRAM. */
16986                 if (!tg3_flag(tp, NO_NVRAM) &&
16987                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16988                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16989                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16990                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16991                 }
16992                 /* Finally just fetch it out of the MAC control regs. */
16993                 else {
16994                         hi = tr32(MAC_ADDR_0_HIGH);
16995                         lo = tr32(MAC_ADDR_0_LOW);
16996
16997                         dev->dev_addr[5] = lo & 0xff;
16998                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16999                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17000                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17001                         dev->dev_addr[1] = hi & 0xff;
17002                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17003                 }
17004         }
17005
17006         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17007 #ifdef CONFIG_SPARC
17008                 if (!tg3_get_default_macaddr_sparc(tp))
17009                         return 0;
17010 #endif
17011                 return -EINVAL;
17012         }
17013         return 0;
17014 }
17015
17016 #define BOUNDARY_SINGLE_CACHELINE       1
17017 #define BOUNDARY_MULTI_CACHELINE        2
17018
17019 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17020 {
17021         int cacheline_size;
17022         u8 byte;
17023         int goal;
17024
17025         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17026         if (byte == 0)
17027                 cacheline_size = 1024;
17028         else
17029                 cacheline_size = (int) byte * 4;
17030
17031         /* On 5703 and later chips, the boundary bits have no
17032          * effect.
17033          */
17034         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17035             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17036             !tg3_flag(tp, PCI_EXPRESS))
17037                 goto out;
17038
17039 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17040         goal = BOUNDARY_MULTI_CACHELINE;
17041 #else
17042 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17043         goal = BOUNDARY_SINGLE_CACHELINE;
17044 #else
17045         goal = 0;
17046 #endif
17047 #endif
17048
17049         if (tg3_flag(tp, 57765_PLUS)) {
17050                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17051                 goto out;
17052         }
17053
17054         if (!goal)
17055                 goto out;
17056
17057         /* PCI controllers on most RISC systems tend to disconnect
17058          * when a device tries to burst across a cache-line boundary.
17059          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17060          *
17061          * Unfortunately, for PCI-E there are only limited
17062          * write-side controls for this, and thus for reads
17063          * we will still get the disconnects.  We'll also waste
17064          * these PCI cycles for both read and write for chips
17065          * other than 5700 and 5701 which do not implement the
17066          * boundary bits.
17067          */
17068         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17069                 switch (cacheline_size) {
17070                 case 16:
17071                 case 32:
17072                 case 64:
17073                 case 128:
17074                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17075                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17076                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17077                         } else {
17078                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17079                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17080                         }
17081                         break;
17082
17083                 case 256:
17084                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17085                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17086                         break;
17087
17088                 default:
17089                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17090                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17091                         break;
17092                 }
17093         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17094                 switch (cacheline_size) {
17095                 case 16:
17096                 case 32:
17097                 case 64:
17098                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17099                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17100                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17101                                 break;
17102                         }
17103                         /* fallthrough */
17104                 case 128:
17105                 default:
17106                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17107                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17108                         break;
17109                 }
17110         } else {
17111                 switch (cacheline_size) {
17112                 case 16:
17113                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17114                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17115                                         DMA_RWCTRL_WRITE_BNDRY_16);
17116                                 break;
17117                         }
17118                         /* fallthrough */
17119                 case 32:
17120                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17121                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17122                                         DMA_RWCTRL_WRITE_BNDRY_32);
17123                                 break;
17124                         }
17125                         /* fallthrough */
17126                 case 64:
17127                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17128                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17129                                         DMA_RWCTRL_WRITE_BNDRY_64);
17130                                 break;
17131                         }
17132                         /* fallthrough */
17133                 case 128:
17134                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17135                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17136                                         DMA_RWCTRL_WRITE_BNDRY_128);
17137                                 break;
17138                         }
17139                         /* fallthrough */
17140                 case 256:
17141                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17142                                 DMA_RWCTRL_WRITE_BNDRY_256);
17143                         break;
17144                 case 512:
17145                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17146                                 DMA_RWCTRL_WRITE_BNDRY_512);
17147                         break;
17148                 case 1024:
17149                 default:
17150                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17151                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17152                         break;
17153                 }
17154         }
17155
17156 out:
17157         return val;
17158 }
17159
17160 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17161                            int size, bool to_device)
17162 {
17163         struct tg3_internal_buffer_desc test_desc;
17164         u32 sram_dma_descs;
17165         int i, ret;
17166
17167         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17168
17169         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17170         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17171         tw32(RDMAC_STATUS, 0);
17172         tw32(WDMAC_STATUS, 0);
17173
17174         tw32(BUFMGR_MODE, 0);
17175         tw32(FTQ_RESET, 0);
17176
17177         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17178         test_desc.addr_lo = buf_dma & 0xffffffff;
17179         test_desc.nic_mbuf = 0x00002100;
17180         test_desc.len = size;
17181
17182         /*
17183          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17184          * the *second* time the tg3 driver was getting loaded after an
17185          * initial scan.
17186          *
17187          * Broadcom tells me:
17188          *   ...the DMA engine is connected to the GRC block and a DMA
17189          *   reset may affect the GRC block in some unpredictable way...
17190          *   The behavior of resets to individual blocks has not been tested.
17191          *
17192          * Broadcom noted the GRC reset will also reset all sub-components.
17193          */
17194         if (to_device) {
17195                 test_desc.cqid_sqid = (13 << 8) | 2;
17196
17197                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17198                 udelay(40);
17199         } else {
17200                 test_desc.cqid_sqid = (16 << 8) | 7;
17201
17202                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17203                 udelay(40);
17204         }
17205         test_desc.flags = 0x00000005;
17206
17207         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17208                 u32 val;
17209
17210                 val = *(((u32 *)&test_desc) + i);
17211                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17212                                        sram_dma_descs + (i * sizeof(u32)));
17213                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17214         }
17215         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17216
17217         if (to_device)
17218                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17219         else
17220                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17221
17222         ret = -ENODEV;
17223         for (i = 0; i < 40; i++) {
17224                 u32 val;
17225
17226                 if (to_device)
17227                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17228                 else
17229                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17230                 if ((val & 0xffff) == sram_dma_descs) {
17231                         ret = 0;
17232                         break;
17233                 }
17234
17235                 udelay(100);
17236         }
17237
17238         return ret;
17239 }
17240
17241 #define TEST_BUFFER_SIZE        0x2000
17242
17243 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17244         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17245         { },
17246 };
17247
17248 static int tg3_test_dma(struct tg3 *tp)
17249 {
17250         dma_addr_t buf_dma;
17251         u32 *buf, saved_dma_rwctrl;
17252         int ret = 0;
17253
17254         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17255                                  &buf_dma, GFP_KERNEL);
17256         if (!buf) {
17257                 ret = -ENOMEM;
17258                 goto out_nofree;
17259         }
17260
17261         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17262                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17263
17264         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17265
17266         if (tg3_flag(tp, 57765_PLUS))
17267                 goto out;
17268
17269         if (tg3_flag(tp, PCI_EXPRESS)) {
17270                 /* DMA read watermark not used on PCIE */
17271                 tp->dma_rwctrl |= 0x00180000;
17272         } else if (!tg3_flag(tp, PCIX_MODE)) {
17273                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17274                     tg3_asic_rev(tp) == ASIC_REV_5750)
17275                         tp->dma_rwctrl |= 0x003f0000;
17276                 else
17277                         tp->dma_rwctrl |= 0x003f000f;
17278         } else {
17279                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17280                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17281                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17282                         u32 read_water = 0x7;
17283
17284                         /* If the 5704 is behind the EPB bridge, we can
17285                          * do the less restrictive ONE_DMA workaround for
17286                          * better performance.
17287                          */
17288                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17289                             tg3_asic_rev(tp) == ASIC_REV_5704)
17290                                 tp->dma_rwctrl |= 0x8000;
17291                         else if (ccval == 0x6 || ccval == 0x7)
17292                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17293
17294                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17295                                 read_water = 4;
17296                         /* Set bit 23 to enable PCIX hw bug fix */
17297                         tp->dma_rwctrl |=
17298                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17299                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17300                                 (1 << 23);
17301                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17302                         /* 5780 always in PCIX mode */
17303                         tp->dma_rwctrl |= 0x00144000;
17304                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17305                         /* 5714 always in PCIX mode */
17306                         tp->dma_rwctrl |= 0x00148000;
17307                 } else {
17308                         tp->dma_rwctrl |= 0x001b000f;
17309                 }
17310         }
17311         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17312                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17313
17314         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17315             tg3_asic_rev(tp) == ASIC_REV_5704)
17316                 tp->dma_rwctrl &= 0xfffffff0;
17317
17318         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17319             tg3_asic_rev(tp) == ASIC_REV_5701) {
17320                 /* Remove this if it causes problems for some boards. */
17321                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17322
17323                 /* On 5700/5701 chips, we need to set this bit.
17324                  * Otherwise the chip will issue cacheline transactions
17325                  * to streamable DMA memory with not all the byte
17326                  * enables turned on.  This is an error on several
17327                  * RISC PCI controllers, in particular sparc64.
17328                  *
17329                  * On 5703/5704 chips, this bit has been reassigned
17330                  * a different meaning.  In particular, it is used
17331                  * on those chips to enable a PCI-X workaround.
17332                  */
17333                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17334         }
17335
17336         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17337
17338
17339         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17340             tg3_asic_rev(tp) != ASIC_REV_5701)
17341                 goto out;
17342
17343         /* It is best to perform DMA test with maximum write burst size
17344          * to expose the 5700/5701 write DMA bug.
17345          */
17346         saved_dma_rwctrl = tp->dma_rwctrl;
17347         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17348         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17349
17350         while (1) {
17351                 u32 *p = buf, i;
17352
17353                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17354                         p[i] = i;
17355
17356                 /* Send the buffer to the chip. */
17357                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17358                 if (ret) {
17359                         dev_err(&tp->pdev->dev,
17360                                 "%s: Buffer write failed. err = %d\n",
17361                                 __func__, ret);
17362                         break;
17363                 }
17364
17365                 /* Now read it back. */
17366                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17367                 if (ret) {
17368                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17369                                 "err = %d\n", __func__, ret);
17370                         break;
17371                 }
17372
17373                 /* Verify it. */
17374                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17375                         if (p[i] == i)
17376                                 continue;
17377
17378                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17379                             DMA_RWCTRL_WRITE_BNDRY_16) {
17380                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17381                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17382                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17383                                 break;
17384                         } else {
17385                                 dev_err(&tp->pdev->dev,
17386                                         "%s: Buffer corrupted on read back! "
17387                                         "(%d != %d)\n", __func__, p[i], i);
17388                                 ret = -ENODEV;
17389                                 goto out;
17390                         }
17391                 }
17392
17393                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17394                         /* Success. */
17395                         ret = 0;
17396                         break;
17397                 }
17398         }
17399         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17400             DMA_RWCTRL_WRITE_BNDRY_16) {
17401                 /* DMA test passed without adjusting DMA boundary,
17402                  * now look for chipsets that are known to expose the
17403                  * DMA bug without failing the test.
17404                  */
17405                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17406                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17407                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17408                 } else {
17409                         /* Safe to use the calculated DMA boundary. */
17410                         tp->dma_rwctrl = saved_dma_rwctrl;
17411                 }
17412
17413                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17414         }
17415
17416 out:
17417         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17418 out_nofree:
17419         return ret;
17420 }
17421
17422 static void tg3_init_bufmgr_config(struct tg3 *tp)
17423 {
17424         if (tg3_flag(tp, 57765_PLUS)) {
17425                 tp->bufmgr_config.mbuf_read_dma_low_water =
17426                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17427                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17428                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17429                 tp->bufmgr_config.mbuf_high_water =
17430                         DEFAULT_MB_HIGH_WATER_57765;
17431
17432                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17433                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17434                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17435                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17436                 tp->bufmgr_config.mbuf_high_water_jumbo =
17437                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17438         } else if (tg3_flag(tp, 5705_PLUS)) {
17439                 tp->bufmgr_config.mbuf_read_dma_low_water =
17440                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17441                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17442                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17443                 tp->bufmgr_config.mbuf_high_water =
17444                         DEFAULT_MB_HIGH_WATER_5705;
17445                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17446                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17447                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17448                         tp->bufmgr_config.mbuf_high_water =
17449                                 DEFAULT_MB_HIGH_WATER_5906;
17450                 }
17451
17452                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17453                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17454                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17455                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17456                 tp->bufmgr_config.mbuf_high_water_jumbo =
17457                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17458         } else {
17459                 tp->bufmgr_config.mbuf_read_dma_low_water =
17460                         DEFAULT_MB_RDMA_LOW_WATER;
17461                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17462                         DEFAULT_MB_MACRX_LOW_WATER;
17463                 tp->bufmgr_config.mbuf_high_water =
17464                         DEFAULT_MB_HIGH_WATER;
17465
17466                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17467                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17468                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17469                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17470                 tp->bufmgr_config.mbuf_high_water_jumbo =
17471                         DEFAULT_MB_HIGH_WATER_JUMBO;
17472         }
17473
17474         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17475         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17476 }
17477
17478 static char *tg3_phy_string(struct tg3 *tp)
17479 {
17480         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17481         case TG3_PHY_ID_BCM5400:        return "5400";
17482         case TG3_PHY_ID_BCM5401:        return "5401";
17483         case TG3_PHY_ID_BCM5411:        return "5411";
17484         case TG3_PHY_ID_BCM5701:        return "5701";
17485         case TG3_PHY_ID_BCM5703:        return "5703";
17486         case TG3_PHY_ID_BCM5704:        return "5704";
17487         case TG3_PHY_ID_BCM5705:        return "5705";
17488         case TG3_PHY_ID_BCM5750:        return "5750";
17489         case TG3_PHY_ID_BCM5752:        return "5752";
17490         case TG3_PHY_ID_BCM5714:        return "5714";
17491         case TG3_PHY_ID_BCM5780:        return "5780";
17492         case TG3_PHY_ID_BCM5755:        return "5755";
17493         case TG3_PHY_ID_BCM5787:        return "5787";
17494         case TG3_PHY_ID_BCM5784:        return "5784";
17495         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17496         case TG3_PHY_ID_BCM5906:        return "5906";
17497         case TG3_PHY_ID_BCM5761:        return "5761";
17498         case TG3_PHY_ID_BCM5718C:       return "5718C";
17499         case TG3_PHY_ID_BCM5718S:       return "5718S";
17500         case TG3_PHY_ID_BCM57765:       return "57765";
17501         case TG3_PHY_ID_BCM5719C:       return "5719C";
17502         case TG3_PHY_ID_BCM5720C:       return "5720C";
17503         case TG3_PHY_ID_BCM5762:        return "5762C";
17504         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17505         case 0:                 return "serdes";
17506         default:                return "unknown";
17507         }
17508 }
17509
17510 static char *tg3_bus_string(struct tg3 *tp, char *str)
17511 {
17512         if (tg3_flag(tp, PCI_EXPRESS)) {
17513                 strcpy(str, "PCI Express");
17514                 return str;
17515         } else if (tg3_flag(tp, PCIX_MODE)) {
17516                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17517
17518                 strcpy(str, "PCIX:");
17519
17520                 if ((clock_ctrl == 7) ||
17521                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17522                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17523                         strcat(str, "133MHz");
17524                 else if (clock_ctrl == 0)
17525                         strcat(str, "33MHz");
17526                 else if (clock_ctrl == 2)
17527                         strcat(str, "50MHz");
17528                 else if (clock_ctrl == 4)
17529                         strcat(str, "66MHz");
17530                 else if (clock_ctrl == 6)
17531                         strcat(str, "100MHz");
17532         } else {
17533                 strcpy(str, "PCI:");
17534                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17535                         strcat(str, "66MHz");
17536                 else
17537                         strcat(str, "33MHz");
17538         }
17539         if (tg3_flag(tp, PCI_32BIT))
17540                 strcat(str, ":32-bit");
17541         else
17542                 strcat(str, ":64-bit");
17543         return str;
17544 }
17545
17546 static void tg3_init_coal(struct tg3 *tp)
17547 {
17548         struct ethtool_coalesce *ec = &tp->coal;
17549
17550         memset(ec, 0, sizeof(*ec));
17551         ec->cmd = ETHTOOL_GCOALESCE;
17552         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17553         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17554         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17555         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17556         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17557         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17558         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17559         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17560         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17561
17562         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17563                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17564                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17565                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17566                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17567                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17568         }
17569
17570         if (tg3_flag(tp, 5705_PLUS)) {
17571                 ec->rx_coalesce_usecs_irq = 0;
17572                 ec->tx_coalesce_usecs_irq = 0;
17573                 ec->stats_block_coalesce_usecs = 0;
17574         }
17575 }
17576
17577 static int tg3_init_one(struct pci_dev *pdev,
17578                                   const struct pci_device_id *ent)
17579 {
17580         struct net_device *dev;
17581         struct tg3 *tp;
17582         int i, err;
17583         u32 sndmbx, rcvmbx, intmbx;
17584         char str[40];
17585         u64 dma_mask, persist_dma_mask;
17586         netdev_features_t features = 0;
17587
17588         printk_once(KERN_INFO "%s\n", version);
17589
17590         err = pci_enable_device(pdev);
17591         if (err) {
17592                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17593                 return err;
17594         }
17595
17596         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17597         if (err) {
17598                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17599                 goto err_out_disable_pdev;
17600         }
17601
17602         pci_set_master(pdev);
17603
17604         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17605         if (!dev) {
17606                 err = -ENOMEM;
17607                 goto err_out_free_res;
17608         }
17609
17610         SET_NETDEV_DEV(dev, &pdev->dev);
17611
17612         tp = netdev_priv(dev);
17613         tp->pdev = pdev;
17614         tp->dev = dev;
17615         tp->rx_mode = TG3_DEF_RX_MODE;
17616         tp->tx_mode = TG3_DEF_TX_MODE;
17617         tp->irq_sync = 1;
17618         tp->pcierr_recovery = false;
17619
17620         if (tg3_debug > 0)
17621                 tp->msg_enable = tg3_debug;
17622         else
17623                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17624
17625         if (pdev_is_ssb_gige_core(pdev)) {
17626                 tg3_flag_set(tp, IS_SSB_CORE);
17627                 if (ssb_gige_must_flush_posted_writes(pdev))
17628                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17629                 if (ssb_gige_one_dma_at_once(pdev))
17630                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17631                 if (ssb_gige_have_roboswitch(pdev)) {
17632                         tg3_flag_set(tp, USE_PHYLIB);
17633                         tg3_flag_set(tp, ROBOSWITCH);
17634                 }
17635                 if (ssb_gige_is_rgmii(pdev))
17636                         tg3_flag_set(tp, RGMII_MODE);
17637         }
17638
17639         /* The word/byte swap controls here control register access byte
17640          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17641          * setting below.
17642          */
17643         tp->misc_host_ctrl =
17644                 MISC_HOST_CTRL_MASK_PCI_INT |
17645                 MISC_HOST_CTRL_WORD_SWAP |
17646                 MISC_HOST_CTRL_INDIR_ACCESS |
17647                 MISC_HOST_CTRL_PCISTATE_RW;
17648
17649         /* The NONFRM (non-frame) byte/word swap controls take effect
17650          * on descriptor entries, anything which isn't packet data.
17651          *
17652          * The StrongARM chips on the board (one for tx, one for rx)
17653          * are running in big-endian mode.
17654          */
17655         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17656                         GRC_MODE_WSWAP_NONFRM_DATA);
17657 #ifdef __BIG_ENDIAN
17658         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17659 #endif
17660         spin_lock_init(&tp->lock);
17661         spin_lock_init(&tp->indirect_lock);
17662         INIT_WORK(&tp->reset_task, tg3_reset_task);
17663
17664         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17665         if (!tp->regs) {
17666                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17667                 err = -ENOMEM;
17668                 goto err_out_free_dev;
17669         }
17670
17671         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17672             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17673             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17674             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17675             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17676             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17677             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17678             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17679             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17680             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17681             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17682             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17683             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17684             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17685             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17686                 tg3_flag_set(tp, ENABLE_APE);
17687                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17688                 if (!tp->aperegs) {
17689                         dev_err(&pdev->dev,
17690                                 "Cannot map APE registers, aborting\n");
17691                         err = -ENOMEM;
17692                         goto err_out_iounmap;
17693                 }
17694         }
17695
17696         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17697         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17698
17699         dev->ethtool_ops = &tg3_ethtool_ops;
17700         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17701         dev->netdev_ops = &tg3_netdev_ops;
17702         dev->irq = pdev->irq;
17703
17704         err = tg3_get_invariants(tp, ent);
17705         if (err) {
17706                 dev_err(&pdev->dev,
17707                         "Problem fetching invariants of chip, aborting\n");
17708                 goto err_out_apeunmap;
17709         }
17710
17711         /* The EPB bridge inside 5714, 5715, and 5780 and any
17712          * device behind the EPB cannot support DMA addresses > 40-bit.
17713          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17714          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17715          * do DMA address check in tg3_start_xmit().
17716          */
17717         if (tg3_flag(tp, IS_5788))
17718                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17719         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17720                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17721 #ifdef CONFIG_HIGHMEM
17722                 dma_mask = DMA_BIT_MASK(64);
17723 #endif
17724         } else
17725                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17726
17727         /* Configure DMA attributes. */
17728         if (dma_mask > DMA_BIT_MASK(32)) {
17729                 err = pci_set_dma_mask(pdev, dma_mask);
17730                 if (!err) {
17731                         features |= NETIF_F_HIGHDMA;
17732                         err = pci_set_consistent_dma_mask(pdev,
17733                                                           persist_dma_mask);
17734                         if (err < 0) {
17735                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17736                                         "DMA for consistent allocations\n");
17737                                 goto err_out_apeunmap;
17738                         }
17739                 }
17740         }
17741         if (err || dma_mask == DMA_BIT_MASK(32)) {
17742                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17743                 if (err) {
17744                         dev_err(&pdev->dev,
17745                                 "No usable DMA configuration, aborting\n");
17746                         goto err_out_apeunmap;
17747                 }
17748         }
17749
17750         tg3_init_bufmgr_config(tp);
17751
17752         /* 5700 B0 chips do not support checksumming correctly due
17753          * to hardware bugs.
17754          */
17755         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17756                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17757
17758                 if (tg3_flag(tp, 5755_PLUS))
17759                         features |= NETIF_F_IPV6_CSUM;
17760         }
17761
17762         /* TSO is on by default on chips that support hardware TSO.
17763          * Firmware TSO on older chips gives lower performance, so it
17764          * is off by default, but can be enabled using ethtool.
17765          */
17766         if ((tg3_flag(tp, HW_TSO_1) ||
17767              tg3_flag(tp, HW_TSO_2) ||
17768              tg3_flag(tp, HW_TSO_3)) &&
17769             (features & NETIF_F_IP_CSUM))
17770                 features |= NETIF_F_TSO;
17771         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17772                 if (features & NETIF_F_IPV6_CSUM)
17773                         features |= NETIF_F_TSO6;
17774                 if (tg3_flag(tp, HW_TSO_3) ||
17775                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17776                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17777                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17778                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17779                     tg3_asic_rev(tp) == ASIC_REV_57780)
17780                         features |= NETIF_F_TSO_ECN;
17781         }
17782
17783         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17784                          NETIF_F_HW_VLAN_CTAG_RX;
17785         dev->vlan_features |= features;
17786
17787         /*
17788          * Add loopback capability only for a subset of devices that support
17789          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17790          * loopback for the remaining devices.
17791          */
17792         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17793             !tg3_flag(tp, CPMU_PRESENT))
17794                 /* Add the loopback capability */
17795                 features |= NETIF_F_LOOPBACK;
17796
17797         dev->hw_features |= features;
17798         dev->priv_flags |= IFF_UNICAST_FLT;
17799
17800         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17801         dev->min_mtu = TG3_MIN_MTU;
17802         dev->max_mtu = TG3_MAX_MTU(tp);
17803
17804         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17805             !tg3_flag(tp, TSO_CAPABLE) &&
17806             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17807                 tg3_flag_set(tp, MAX_RXPEND_64);
17808                 tp->rx_pending = 63;
17809         }
17810
17811         err = tg3_get_device_address(tp);
17812         if (err) {
17813                 dev_err(&pdev->dev,
17814                         "Could not obtain valid ethernet address, aborting\n");
17815                 goto err_out_apeunmap;
17816         }
17817
17818         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17819         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17820         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17821         for (i = 0; i < tp->irq_max; i++) {
17822                 struct tg3_napi *tnapi = &tp->napi[i];
17823
17824                 tnapi->tp = tp;
17825                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17826
17827                 tnapi->int_mbox = intmbx;
17828                 if (i <= 4)
17829                         intmbx += 0x8;
17830                 else
17831                         intmbx += 0x4;
17832
17833                 tnapi->consmbox = rcvmbx;
17834                 tnapi->prodmbox = sndmbx;
17835
17836                 if (i)
17837                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17838                 else
17839                         tnapi->coal_now = HOSTCC_MODE_NOW;
17840
17841                 if (!tg3_flag(tp, SUPPORT_MSIX))
17842                         break;
17843
17844                 /*
17845                  * If we support MSIX, we'll be using RSS.  If we're using
17846                  * RSS, the first vector only handles link interrupts and the
17847                  * remaining vectors handle rx and tx interrupts.  Reuse the
17848                  * mailbox values for the next iteration.  The values we setup
17849                  * above are still useful for the single vectored mode.
17850                  */
17851                 if (!i)
17852                         continue;
17853
17854                 rcvmbx += 0x8;
17855
17856                 if (sndmbx & 0x4)
17857                         sndmbx -= 0x4;
17858                 else
17859                         sndmbx += 0xc;
17860         }
17861
17862         /*
17863          * Reset chip in case UNDI or EFI driver did not shutdown
17864          * DMA self test will enable WDMAC and we'll see (spurious)
17865          * pending DMA on the PCI bus at that point.
17866          */
17867         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17868             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17869                 tg3_full_lock(tp, 0);
17870                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17871                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17872                 tg3_full_unlock(tp);
17873         }
17874
17875         err = tg3_test_dma(tp);
17876         if (err) {
17877                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17878                 goto err_out_apeunmap;
17879         }
17880
17881         tg3_init_coal(tp);
17882
17883         pci_set_drvdata(pdev, dev);
17884
17885         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17886             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17887             tg3_asic_rev(tp) == ASIC_REV_5762)
17888                 tg3_flag_set(tp, PTP_CAPABLE);
17889
17890         tg3_timer_init(tp);
17891
17892         tg3_carrier_off(tp);
17893
17894         err = register_netdev(dev);
17895         if (err) {
17896                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17897                 goto err_out_apeunmap;
17898         }
17899
17900         if (tg3_flag(tp, PTP_CAPABLE)) {
17901                 tg3_ptp_init(tp);
17902                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17903                                                    &tp->pdev->dev);
17904                 if (IS_ERR(tp->ptp_clock))
17905                         tp->ptp_clock = NULL;
17906         }
17907
17908         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17909                     tp->board_part_number,
17910                     tg3_chip_rev_id(tp),
17911                     tg3_bus_string(tp, str),
17912                     dev->dev_addr);
17913
17914         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17915                 char *ethtype;
17916
17917                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17918                         ethtype = "10/100Base-TX";
17919                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17920                         ethtype = "1000Base-SX";
17921                 else
17922                         ethtype = "10/100/1000Base-T";
17923
17924                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17925                             "(WireSpeed[%d], EEE[%d])\n",
17926                             tg3_phy_string(tp), ethtype,
17927                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17928                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17929         }
17930
17931         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17932                     (dev->features & NETIF_F_RXCSUM) != 0,
17933                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17934                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17935                     tg3_flag(tp, ENABLE_ASF) != 0,
17936                     tg3_flag(tp, TSO_CAPABLE) != 0);
17937         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17938                     tp->dma_rwctrl,
17939                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17940                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17941
17942         pci_save_state(pdev);
17943
17944         return 0;
17945
17946 err_out_apeunmap:
17947         if (tp->aperegs) {
17948                 iounmap(tp->aperegs);
17949                 tp->aperegs = NULL;
17950         }
17951
17952 err_out_iounmap:
17953         if (tp->regs) {
17954                 iounmap(tp->regs);
17955                 tp->regs = NULL;
17956         }
17957
17958 err_out_free_dev:
17959         free_netdev(dev);
17960
17961 err_out_free_res:
17962         pci_release_regions(pdev);
17963
17964 err_out_disable_pdev:
17965         if (pci_is_enabled(pdev))
17966                 pci_disable_device(pdev);
17967         return err;
17968 }
17969
17970 static void tg3_remove_one(struct pci_dev *pdev)
17971 {
17972         struct net_device *dev = pci_get_drvdata(pdev);
17973
17974         if (dev) {
17975                 struct tg3 *tp = netdev_priv(dev);
17976
17977                 tg3_ptp_fini(tp);
17978
17979                 release_firmware(tp->fw);
17980
17981                 tg3_reset_task_cancel(tp);
17982
17983                 if (tg3_flag(tp, USE_PHYLIB)) {
17984                         tg3_phy_fini(tp);
17985                         tg3_mdio_fini(tp);
17986                 }
17987
17988                 unregister_netdev(dev);
17989                 if (tp->aperegs) {
17990                         iounmap(tp->aperegs);
17991                         tp->aperegs = NULL;
17992                 }
17993                 if (tp->regs) {
17994                         iounmap(tp->regs);
17995                         tp->regs = NULL;
17996                 }
17997                 free_netdev(dev);
17998                 pci_release_regions(pdev);
17999                 pci_disable_device(pdev);
18000         }
18001 }
18002
18003 #ifdef CONFIG_PM_SLEEP
18004 static int tg3_suspend(struct device *device)
18005 {
18006         struct pci_dev *pdev = to_pci_dev(device);
18007         struct net_device *dev = pci_get_drvdata(pdev);
18008         struct tg3 *tp = netdev_priv(dev);
18009         int err = 0;
18010
18011         rtnl_lock();
18012
18013         if (!netif_running(dev))
18014                 goto unlock;
18015
18016         tg3_reset_task_cancel(tp);
18017         tg3_phy_stop(tp);
18018         tg3_netif_stop(tp);
18019
18020         tg3_timer_stop(tp);
18021
18022         tg3_full_lock(tp, 1);
18023         tg3_disable_ints(tp);
18024         tg3_full_unlock(tp);
18025
18026         netif_device_detach(dev);
18027
18028         tg3_full_lock(tp, 0);
18029         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18030         tg3_flag_clear(tp, INIT_COMPLETE);
18031         tg3_full_unlock(tp);
18032
18033         err = tg3_power_down_prepare(tp);
18034         if (err) {
18035                 int err2;
18036
18037                 tg3_full_lock(tp, 0);
18038
18039                 tg3_flag_set(tp, INIT_COMPLETE);
18040                 err2 = tg3_restart_hw(tp, true);
18041                 if (err2)
18042                         goto out;
18043
18044                 tg3_timer_start(tp);
18045
18046                 netif_device_attach(dev);
18047                 tg3_netif_start(tp);
18048
18049 out:
18050                 tg3_full_unlock(tp);
18051
18052                 if (!err2)
18053                         tg3_phy_start(tp);
18054         }
18055
18056 unlock:
18057         rtnl_unlock();
18058         return err;
18059 }
18060
18061 static int tg3_resume(struct device *device)
18062 {
18063         struct pci_dev *pdev = to_pci_dev(device);
18064         struct net_device *dev = pci_get_drvdata(pdev);
18065         struct tg3 *tp = netdev_priv(dev);
18066         int err = 0;
18067
18068         rtnl_lock();
18069
18070         if (!netif_running(dev))
18071                 goto unlock;
18072
18073         netif_device_attach(dev);
18074
18075         tg3_full_lock(tp, 0);
18076
18077         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18078
18079         tg3_flag_set(tp, INIT_COMPLETE);
18080         err = tg3_restart_hw(tp,
18081                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18082         if (err)
18083                 goto out;
18084
18085         tg3_timer_start(tp);
18086
18087         tg3_netif_start(tp);
18088
18089 out:
18090         tg3_full_unlock(tp);
18091
18092         if (!err)
18093                 tg3_phy_start(tp);
18094
18095 unlock:
18096         rtnl_unlock();
18097         return err;
18098 }
18099 #endif /* CONFIG_PM_SLEEP */
18100
18101 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18102
18103 static void tg3_shutdown(struct pci_dev *pdev)
18104 {
18105         struct net_device *dev = pci_get_drvdata(pdev);
18106         struct tg3 *tp = netdev_priv(dev);
18107
18108         rtnl_lock();
18109         netif_device_detach(dev);
18110
18111         if (netif_running(dev))
18112                 dev_close(dev);
18113
18114         if (system_state == SYSTEM_POWER_OFF)
18115                 tg3_power_down(tp);
18116
18117         rtnl_unlock();
18118 }
18119
18120 /**
18121  * tg3_io_error_detected - called when PCI error is detected
18122  * @pdev: Pointer to PCI device
18123  * @state: The current pci connection state
18124  *
18125  * This function is called after a PCI bus error affecting
18126  * this device has been detected.
18127  */
18128 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18129                                               pci_channel_state_t state)
18130 {
18131         struct net_device *netdev = pci_get_drvdata(pdev);
18132         struct tg3 *tp = netdev_priv(netdev);
18133         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18134
18135         netdev_info(netdev, "PCI I/O error detected\n");
18136
18137         rtnl_lock();
18138
18139         /* We probably don't have netdev yet */
18140         if (!netdev || !netif_running(netdev))
18141                 goto done;
18142
18143         /* We needn't recover from permanent error */
18144         if (state == pci_channel_io_frozen)
18145                 tp->pcierr_recovery = true;
18146
18147         tg3_phy_stop(tp);
18148
18149         tg3_netif_stop(tp);
18150
18151         tg3_timer_stop(tp);
18152
18153         /* Want to make sure that the reset task doesn't run */
18154         tg3_reset_task_cancel(tp);
18155
18156         netif_device_detach(netdev);
18157
18158         /* Clean up software state, even if MMIO is blocked */
18159         tg3_full_lock(tp, 0);
18160         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18161         tg3_full_unlock(tp);
18162
18163 done:
18164         if (state == pci_channel_io_perm_failure) {
18165                 if (netdev) {
18166                         tg3_napi_enable(tp);
18167                         dev_close(netdev);
18168                 }
18169                 err = PCI_ERS_RESULT_DISCONNECT;
18170         } else {
18171                 pci_disable_device(pdev);
18172         }
18173
18174         rtnl_unlock();
18175
18176         return err;
18177 }
18178
18179 /**
18180  * tg3_io_slot_reset - called after the pci bus has been reset.
18181  * @pdev: Pointer to PCI device
18182  *
18183  * Restart the card from scratch, as if from a cold-boot.
18184  * At this point, the card has exprienced a hard reset,
18185  * followed by fixups by BIOS, and has its config space
18186  * set up identically to what it was at cold boot.
18187  */
18188 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18189 {
18190         struct net_device *netdev = pci_get_drvdata(pdev);
18191         struct tg3 *tp = netdev_priv(netdev);
18192         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18193         int err;
18194
18195         rtnl_lock();
18196
18197         if (pci_enable_device(pdev)) {
18198                 dev_err(&pdev->dev,
18199                         "Cannot re-enable PCI device after reset.\n");
18200                 goto done;
18201         }
18202
18203         pci_set_master(pdev);
18204         pci_restore_state(pdev);
18205         pci_save_state(pdev);
18206
18207         if (!netdev || !netif_running(netdev)) {
18208                 rc = PCI_ERS_RESULT_RECOVERED;
18209                 goto done;
18210         }
18211
18212         err = tg3_power_up(tp);
18213         if (err)
18214                 goto done;
18215
18216         rc = PCI_ERS_RESULT_RECOVERED;
18217
18218 done:
18219         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18220                 tg3_napi_enable(tp);
18221                 dev_close(netdev);
18222         }
18223         rtnl_unlock();
18224
18225         return rc;
18226 }
18227
18228 /**
18229  * tg3_io_resume - called when traffic can start flowing again.
18230  * @pdev: Pointer to PCI device
18231  *
18232  * This callback is called when the error recovery driver tells
18233  * us that its OK to resume normal operation.
18234  */
18235 static void tg3_io_resume(struct pci_dev *pdev)
18236 {
18237         struct net_device *netdev = pci_get_drvdata(pdev);
18238         struct tg3 *tp = netdev_priv(netdev);
18239         int err;
18240
18241         rtnl_lock();
18242
18243         if (!netdev || !netif_running(netdev))
18244                 goto done;
18245
18246         tg3_full_lock(tp, 0);
18247         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18248         tg3_flag_set(tp, INIT_COMPLETE);
18249         err = tg3_restart_hw(tp, true);
18250         if (err) {
18251                 tg3_full_unlock(tp);
18252                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18253                 goto done;
18254         }
18255
18256         netif_device_attach(netdev);
18257
18258         tg3_timer_start(tp);
18259
18260         tg3_netif_start(tp);
18261
18262         tg3_full_unlock(tp);
18263
18264         tg3_phy_start(tp);
18265
18266 done:
18267         tp->pcierr_recovery = false;
18268         rtnl_unlock();
18269 }
18270
18271 static const struct pci_error_handlers tg3_err_handler = {
18272         .error_detected = tg3_io_error_detected,
18273         .slot_reset     = tg3_io_slot_reset,
18274         .resume         = tg3_io_resume
18275 };
18276
18277 static struct pci_driver tg3_driver = {
18278         .name           = DRV_MODULE_NAME,
18279         .id_table       = tg3_pci_tbl,
18280         .probe          = tg3_init_one,
18281         .remove         = tg3_remove_one,
18282         .err_handler    = &tg3_err_handler,
18283         .driver.pm      = &tg3_pm_ops,
18284         .shutdown       = tg3_shutdown,
18285 };
18286
18287 module_pci_driver(tg3_driver);