sched/headers: Prepare to move signal wakeup & sigpending methods from <linux/sched...
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/types.h>
25 #include <linux/compiler.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/in.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <linux/ip.h>
43 #include <linux/tcp.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/firmware.h>
48 #include <linux/ssb/ssb_driver_gige.h>
49 #include <linux/hwmon.h>
50 #include <linux/hwmon-sysfs.h>
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #include <uapi/linux/net_tstamp.h>
60 #include <linux/ptp_clock_kernel.h>
61
62 #ifdef CONFIG_SPARC
63 #include <asm/idprom.h>
64 #include <asm/prom.h>
65 #endif
66
67 #define BAR_0   0
68 #define BAR_2   2
69
70 #include "tg3.h"
71
72 /* Functions & macros to verify TG3_FLAGS types */
73
74 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76         return test_bit(flag, bits);
77 }
78
79 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81         set_bit(flag, bits);
82 }
83
84 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 {
86         clear_bit(flag, bits);
87 }
88
89 #define tg3_flag(tp, flag)                              \
90         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_set(tp, flag)                          \
92         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_clear(tp, flag)                        \
94         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95
96 #define DRV_MODULE_NAME         "tg3"
97 #define TG3_MAJ_NUM                     3
98 #define TG3_MIN_NUM                     137
99 #define DRV_MODULE_VERSION      \
100         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
101 #define DRV_MODULE_RELDATE      "May 11, 2014"
102
103 #define RESET_KIND_SHUTDOWN     0
104 #define RESET_KIND_INIT         1
105 #define RESET_KIND_SUSPEND      2
106
107 #define TG3_DEF_RX_MODE         0
108 #define TG3_DEF_TX_MODE         0
109 #define TG3_DEF_MSG_ENABLE        \
110         (NETIF_MSG_DRV          | \
111          NETIF_MSG_PROBE        | \
112          NETIF_MSG_LINK         | \
113          NETIF_MSG_TIMER        | \
114          NETIF_MSG_IFDOWN       | \
115          NETIF_MSG_IFUP         | \
116          NETIF_MSG_RX_ERR       | \
117          NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
120
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124
125 #define TG3_TX_TIMEOUT                  (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU                     ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING         200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151
152 #define TG3_TX_RING_SIZE                512
153 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
162                                  TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB               64
166
167 #define TG3_RX_STD_DMA_SZ               1536
168 #define TG3_RX_JMB_DMA_SZ               9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD           256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
195 #else
196         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K            2048
208 #define TG3_TX_BD_DMA_MAX_4K            4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
216 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3            "tigon/tg3.bin"
219 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
222
223 static char version[] =
224         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
225
226 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
227 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(DRV_MODULE_VERSION);
230 MODULE_FIRMWARE(FIRMWARE_TG3);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
232 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
233
234 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
235 module_param(tg3_debug, int, 0);
236 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
237
238 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
239 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
240
241 static const struct pci_device_id tg3_pci_tbl[] = {
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
261          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
262                         TG3_DRV_DATA_FLAG_5705_10_100},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
268          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269                         TG3_DRV_DATA_FLAG_5705_10_100},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
276          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
282          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
290         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
291                         PCI_VENDOR_ID_LENOVO,
292                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
293          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
296          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
315         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
316                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
319                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
324          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
334          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
349         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
350         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
355         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
356         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
357         {}
358 };
359
360 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
361
362 static const struct {
363         const char string[ETH_GSTRING_LEN];
364 } ethtool_stats_keys[] = {
365         { "rx_octets" },
366         { "rx_fragments" },
367         { "rx_ucast_packets" },
368         { "rx_mcast_packets" },
369         { "rx_bcast_packets" },
370         { "rx_fcs_errors" },
371         { "rx_align_errors" },
372         { "rx_xon_pause_rcvd" },
373         { "rx_xoff_pause_rcvd" },
374         { "rx_mac_ctrl_rcvd" },
375         { "rx_xoff_entered" },
376         { "rx_frame_too_long_errors" },
377         { "rx_jabbers" },
378         { "rx_undersize_packets" },
379         { "rx_in_length_errors" },
380         { "rx_out_length_errors" },
381         { "rx_64_or_less_octet_packets" },
382         { "rx_65_to_127_octet_packets" },
383         { "rx_128_to_255_octet_packets" },
384         { "rx_256_to_511_octet_packets" },
385         { "rx_512_to_1023_octet_packets" },
386         { "rx_1024_to_1522_octet_packets" },
387         { "rx_1523_to_2047_octet_packets" },
388         { "rx_2048_to_4095_octet_packets" },
389         { "rx_4096_to_8191_octet_packets" },
390         { "rx_8192_to_9022_octet_packets" },
391
392         { "tx_octets" },
393         { "tx_collisions" },
394
395         { "tx_xon_sent" },
396         { "tx_xoff_sent" },
397         { "tx_flow_control" },
398         { "tx_mac_errors" },
399         { "tx_single_collisions" },
400         { "tx_mult_collisions" },
401         { "tx_deferred" },
402         { "tx_excessive_collisions" },
403         { "tx_late_collisions" },
404         { "tx_collide_2times" },
405         { "tx_collide_3times" },
406         { "tx_collide_4times" },
407         { "tx_collide_5times" },
408         { "tx_collide_6times" },
409         { "tx_collide_7times" },
410         { "tx_collide_8times" },
411         { "tx_collide_9times" },
412         { "tx_collide_10times" },
413         { "tx_collide_11times" },
414         { "tx_collide_12times" },
415         { "tx_collide_13times" },
416         { "tx_collide_14times" },
417         { "tx_collide_15times" },
418         { "tx_ucast_packets" },
419         { "tx_mcast_packets" },
420         { "tx_bcast_packets" },
421         { "tx_carrier_sense_errors" },
422         { "tx_discards" },
423         { "tx_errors" },
424
425         { "dma_writeq_full" },
426         { "dma_write_prioq_full" },
427         { "rxbds_empty" },
428         { "rx_discards" },
429         { "rx_errors" },
430         { "rx_threshold_hit" },
431
432         { "dma_readq_full" },
433         { "dma_read_prioq_full" },
434         { "tx_comp_queue_full" },
435
436         { "ring_set_send_prod_index" },
437         { "ring_status_update" },
438         { "nic_irqs" },
439         { "nic_avoided_irqs" },
440         { "nic_tx_threshold_hit" },
441
442         { "mbuf_lwm_thresh_hit" },
443 };
444
445 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
446 #define TG3_NVRAM_TEST          0
447 #define TG3_LINK_TEST           1
448 #define TG3_REGISTER_TEST       2
449 #define TG3_MEMORY_TEST         3
450 #define TG3_MAC_LOOPB_TEST      4
451 #define TG3_PHY_LOOPB_TEST      5
452 #define TG3_EXT_LOOPB_TEST      6
453 #define TG3_INTERRUPT_TEST      7
454
455
456 static const struct {
457         const char string[ETH_GSTRING_LEN];
458 } ethtool_test_keys[] = {
459         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
460         [TG3_LINK_TEST]         = { "link test         (online) " },
461         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
462         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
463         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
464         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
465         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
466         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
467 };
468
469 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
470
471
472 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->regs + off);
475 }
476
477 static u32 tg3_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->regs + off);
480 }
481
482 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
483 {
484         writel(val, tp->aperegs + off);
485 }
486
487 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
488 {
489         return readl(tp->aperegs + off);
490 }
491
492 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         unsigned long flags;
495
496         spin_lock_irqsave(&tp->indirect_lock, flags);
497         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
499         spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 }
501
502 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
503 {
504         writel(val, tp->regs + off);
505         readl(tp->regs + off);
506 }
507
508 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 {
510         unsigned long flags;
511         u32 val;
512
513         spin_lock_irqsave(&tp->indirect_lock, flags);
514         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
515         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517         return val;
518 }
519
520 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522         unsigned long flags;
523
524         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
525                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
526                                        TG3_64BIT_REG_LOW, val);
527                 return;
528         }
529         if (off == TG3_RX_STD_PROD_IDX_REG) {
530                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
531                                        TG3_64BIT_REG_LOW, val);
532                 return;
533         }
534
535         spin_lock_irqsave(&tp->indirect_lock, flags);
536         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
537         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
538         spin_unlock_irqrestore(&tp->indirect_lock, flags);
539
540         /* In indirect mode when disabling interrupts, we also need
541          * to clear the interrupt bit in the GRC local ctrl register.
542          */
543         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
544             (val == 0x1)) {
545                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
546                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
547         }
548 }
549
550 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 {
552         unsigned long flags;
553         u32 val;
554
555         spin_lock_irqsave(&tp->indirect_lock, flags);
556         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
557         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
558         spin_unlock_irqrestore(&tp->indirect_lock, flags);
559         return val;
560 }
561
562 /* usec_wait specifies the wait time in usec when writing to certain registers
563  * where it is unsafe to read back the register without some delay.
564  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
565  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
566  */
567 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
568 {
569         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
570                 /* Non-posted methods */
571                 tp->write32(tp, off, val);
572         else {
573                 /* Posted method */
574                 tg3_write32(tp, off, val);
575                 if (usec_wait)
576                         udelay(usec_wait);
577                 tp->read32(tp, off);
578         }
579         /* Wait again after the read for the posted method to guarantee that
580          * the wait time is met.
581          */
582         if (usec_wait)
583                 udelay(usec_wait);
584 }
585
586 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
587 {
588         tp->write32_mbox(tp, off, val);
589         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
590             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
591              !tg3_flag(tp, ICH_WORKAROUND)))
592                 tp->read32_mbox(tp, off);
593 }
594
595 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
596 {
597         void __iomem *mbox = tp->regs + off;
598         writel(val, mbox);
599         if (tg3_flag(tp, TXD_MBOX_HWBUG))
600                 writel(val, mbox);
601         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
602             tg3_flag(tp, FLUSH_POSTED_WRITES))
603                 readl(mbox);
604 }
605
606 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
607 {
608         return readl(tp->regs + off + GRCMBOX_BASE);
609 }
610
611 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
612 {
613         writel(val, tp->regs + off + GRCMBOX_BASE);
614 }
615
616 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
617 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
618 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
619 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
620 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
621
622 #define tw32(reg, val)                  tp->write32(tp, reg, val)
623 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
624 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
625 #define tr32(reg)                       tp->read32(tp, reg)
626
627 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
628 {
629         unsigned long flags;
630
631         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
632             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633                 return;
634
635         spin_lock_irqsave(&tp->indirect_lock, flags);
636         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
638                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
639
640                 /* Always leave this as zero. */
641                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
642         } else {
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
644                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
645
646                 /* Always leave this as zero. */
647                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
648         }
649         spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 }
651
652 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
653 {
654         unsigned long flags;
655
656         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
657             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658                 *val = 0;
659                 return;
660         }
661
662         spin_lock_irqsave(&tp->indirect_lock, flags);
663         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
665                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
666
667                 /* Always leave this as zero. */
668                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
669         } else {
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
671                 *val = tr32(TG3PCI_MEM_WIN_DATA);
672
673                 /* Always leave this as zero. */
674                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
675         }
676         spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 }
678
679 static void tg3_ape_lock_init(struct tg3 *tp)
680 {
681         int i;
682         u32 regbase, bit;
683
684         if (tg3_asic_rev(tp) == ASIC_REV_5761)
685                 regbase = TG3_APE_LOCK_GRANT;
686         else
687                 regbase = TG3_APE_PER_LOCK_GRANT;
688
689         /* Make sure the driver hasn't any stale locks. */
690         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
691                 switch (i) {
692                 case TG3_APE_LOCK_PHY0:
693                 case TG3_APE_LOCK_PHY1:
694                 case TG3_APE_LOCK_PHY2:
695                 case TG3_APE_LOCK_PHY3:
696                         bit = APE_LOCK_GRANT_DRIVER;
697                         break;
698                 default:
699                         if (!tp->pci_fn)
700                                 bit = APE_LOCK_GRANT_DRIVER;
701                         else
702                                 bit = 1 << tp->pci_fn;
703                 }
704                 tg3_ape_write32(tp, regbase + 4 * i, bit);
705         }
706
707 }
708
709 static int tg3_ape_lock(struct tg3 *tp, int locknum)
710 {
711         int i, off;
712         int ret = 0;
713         u32 status, req, gnt, bit;
714
715         if (!tg3_flag(tp, ENABLE_APE))
716                 return 0;
717
718         switch (locknum) {
719         case TG3_APE_LOCK_GPIO:
720                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721                         return 0;
722         case TG3_APE_LOCK_GRC:
723         case TG3_APE_LOCK_MEM:
724                 if (!tp->pci_fn)
725                         bit = APE_LOCK_REQ_DRIVER;
726                 else
727                         bit = 1 << tp->pci_fn;
728                 break;
729         case TG3_APE_LOCK_PHY0:
730         case TG3_APE_LOCK_PHY1:
731         case TG3_APE_LOCK_PHY2:
732         case TG3_APE_LOCK_PHY3:
733                 bit = APE_LOCK_REQ_DRIVER;
734                 break;
735         default:
736                 return -EINVAL;
737         }
738
739         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740                 req = TG3_APE_LOCK_REQ;
741                 gnt = TG3_APE_LOCK_GRANT;
742         } else {
743                 req = TG3_APE_PER_LOCK_REQ;
744                 gnt = TG3_APE_PER_LOCK_GRANT;
745         }
746
747         off = 4 * locknum;
748
749         tg3_ape_write32(tp, req + off, bit);
750
751         /* Wait for up to 1 millisecond to acquire lock. */
752         for (i = 0; i < 100; i++) {
753                 status = tg3_ape_read32(tp, gnt + off);
754                 if (status == bit)
755                         break;
756                 if (pci_channel_offline(tp->pdev))
757                         break;
758
759                 udelay(10);
760         }
761
762         if (status != bit) {
763                 /* Revoke the lock request. */
764                 tg3_ape_write32(tp, gnt + off, bit);
765                 ret = -EBUSY;
766         }
767
768         return ret;
769 }
770
771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773         u32 gnt, bit;
774
775         if (!tg3_flag(tp, ENABLE_APE))
776                 return;
777
778         switch (locknum) {
779         case TG3_APE_LOCK_GPIO:
780                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
781                         return;
782         case TG3_APE_LOCK_GRC:
783         case TG3_APE_LOCK_MEM:
784                 if (!tp->pci_fn)
785                         bit = APE_LOCK_GRANT_DRIVER;
786                 else
787                         bit = 1 << tp->pci_fn;
788                 break;
789         case TG3_APE_LOCK_PHY0:
790         case TG3_APE_LOCK_PHY1:
791         case TG3_APE_LOCK_PHY2:
792         case TG3_APE_LOCK_PHY3:
793                 bit = APE_LOCK_GRANT_DRIVER;
794                 break;
795         default:
796                 return;
797         }
798
799         if (tg3_asic_rev(tp) == ASIC_REV_5761)
800                 gnt = TG3_APE_LOCK_GRANT;
801         else
802                 gnt = TG3_APE_PER_LOCK_GRANT;
803
804         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809         u32 apedata;
810
811         while (timeout_us) {
812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813                         return -EBUSY;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821                 udelay(10);
822                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823         }
824
825         return timeout_us ? 0 : -EBUSY;
826 }
827
828 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
829 {
830         u32 i, apedata;
831
832         for (i = 0; i < timeout_us / 10; i++) {
833                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
834
835                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
836                         break;
837
838                 udelay(10);
839         }
840
841         return i == timeout_us / 10;
842 }
843
844 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
845                                    u32 len)
846 {
847         int err;
848         u32 i, bufoff, msgoff, maxlen, apedata;
849
850         if (!tg3_flag(tp, APE_HAS_NCSI))
851                 return 0;
852
853         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
854         if (apedata != APE_SEG_SIG_MAGIC)
855                 return -ENODEV;
856
857         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
858         if (!(apedata & APE_FW_STATUS_READY))
859                 return -EAGAIN;
860
861         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
862                  TG3_APE_SHMEM_BASE;
863         msgoff = bufoff + 2 * sizeof(u32);
864         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
865
866         while (len) {
867                 u32 length;
868
869                 /* Cap xfer sizes to scratchpad limits. */
870                 length = (len > maxlen) ? maxlen : len;
871                 len -= length;
872
873                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
874                 if (!(apedata & APE_FW_STATUS_READY))
875                         return -EAGAIN;
876
877                 /* Wait for up to 1 msec for APE to service previous event. */
878                 err = tg3_ape_event_lock(tp, 1000);
879                 if (err)
880                         return err;
881
882                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
883                           APE_EVENT_STATUS_SCRTCHPD_READ |
884                           APE_EVENT_STATUS_EVENT_PENDING;
885                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
886
887                 tg3_ape_write32(tp, bufoff, base_off);
888                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
889
890                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
891                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
892
893                 base_off += length;
894
895                 if (tg3_ape_wait_for_event(tp, 30000))
896                         return -EAGAIN;
897
898                 for (i = 0; length; i += 4, length -= 4) {
899                         u32 val = tg3_ape_read32(tp, msgoff + i);
900                         memcpy(data, &val, sizeof(u32));
901                         data++;
902                 }
903         }
904
905         return 0;
906 }
907
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910         int err;
911         u32 apedata;
912
913         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914         if (apedata != APE_SEG_SIG_MAGIC)
915                 return -EAGAIN;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918         if (!(apedata & APE_FW_STATUS_READY))
919                 return -EAGAIN;
920
921         /* Wait for up to 1 millisecond for APE to service previous event. */
922         err = tg3_ape_event_lock(tp, 1000);
923         if (err)
924                 return err;
925
926         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927                         event | APE_EVENT_STATUS_EVENT_PENDING);
928
929         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931
932         return 0;
933 }
934
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937         u32 event;
938         u32 apedata;
939
940         if (!tg3_flag(tp, ENABLE_APE))
941                 return;
942
943         switch (kind) {
944         case RESET_KIND_INIT:
945                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
946                                 APE_HOST_SEG_SIG_MAGIC);
947                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
948                                 APE_HOST_SEG_LEN_MAGIC);
949                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
950                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
951                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
952                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
953                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
954                                 APE_HOST_BEHAV_NO_PHYLOCK);
955                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
956                                     TG3_APE_HOST_DRVR_STATE_START);
957
958                 event = APE_EVENT_STATUS_STATE_START;
959                 break;
960         case RESET_KIND_SHUTDOWN:
961                 /* With the interface we are currently using,
962                  * APE does not track driver state.  Wiping
963                  * out the HOST SEGMENT SIGNATURE forces
964                  * the APE to assume OS absent status.
965                  */
966                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
967
968                 if (device_may_wakeup(&tp->pdev->dev) &&
969                     tg3_flag(tp, WOL_ENABLE)) {
970                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971                                             TG3_APE_HOST_WOL_SPEED_AUTO);
972                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973                 } else
974                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975
976                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977
978                 event = APE_EVENT_STATUS_STATE_UNLOAD;
979                 break;
980         default:
981                 return;
982         }
983
984         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985
986         tg3_ape_send_event(tp, event);
987 }
988
989 static void tg3_disable_ints(struct tg3 *tp)
990 {
991         int i;
992
993         tw32(TG3PCI_MISC_HOST_CTRL,
994              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
995         for (i = 0; i < tp->irq_max; i++)
996                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
997 }
998
999 static void tg3_enable_ints(struct tg3 *tp)
1000 {
1001         int i;
1002
1003         tp->irq_sync = 0;
1004         wmb();
1005
1006         tw32(TG3PCI_MISC_HOST_CTRL,
1007              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1008
1009         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1010         for (i = 0; i < tp->irq_cnt; i++) {
1011                 struct tg3_napi *tnapi = &tp->napi[i];
1012
1013                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1014                 if (tg3_flag(tp, 1SHOT_MSI))
1015                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1016
1017                 tp->coal_now |= tnapi->coal_now;
1018         }
1019
1020         /* Force an initial interrupt */
1021         if (!tg3_flag(tp, TAGGED_STATUS) &&
1022             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1023                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1024         else
1025                 tw32(HOSTCC_MODE, tp->coal_now);
1026
1027         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1028 }
1029
1030 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1031 {
1032         struct tg3 *tp = tnapi->tp;
1033         struct tg3_hw_status *sblk = tnapi->hw_status;
1034         unsigned int work_exists = 0;
1035
1036         /* check for phy events */
1037         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1038                 if (sblk->status & SD_STATUS_LINK_CHG)
1039                         work_exists = 1;
1040         }
1041
1042         /* check for TX work to do */
1043         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1044                 work_exists = 1;
1045
1046         /* check for RX work to do */
1047         if (tnapi->rx_rcb_prod_idx &&
1048             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1049                 work_exists = 1;
1050
1051         return work_exists;
1052 }
1053
1054 /* tg3_int_reenable
1055  *  similar to tg3_enable_ints, but it accurately determines whether there
1056  *  is new work pending and can return without flushing the PIO write
1057  *  which reenables interrupts
1058  */
1059 static void tg3_int_reenable(struct tg3_napi *tnapi)
1060 {
1061         struct tg3 *tp = tnapi->tp;
1062
1063         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1064         mmiowb();
1065
1066         /* When doing tagged status, this work check is unnecessary.
1067          * The last_tag we write above tells the chip which piece of
1068          * work we've completed.
1069          */
1070         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1071                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1072                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1073 }
1074
1075 static void tg3_switch_clocks(struct tg3 *tp)
1076 {
1077         u32 clock_ctrl;
1078         u32 orig_clock_ctrl;
1079
1080         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1081                 return;
1082
1083         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1084
1085         orig_clock_ctrl = clock_ctrl;
1086         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1087                        CLOCK_CTRL_CLKRUN_OENABLE |
1088                        0x1f);
1089         tp->pci_clock_ctrl = clock_ctrl;
1090
1091         if (tg3_flag(tp, 5705_PLUS)) {
1092                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1093                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1094                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1095                 }
1096         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1097                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098                             clock_ctrl |
1099                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1100                             40);
1101                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1103                             40);
1104         }
1105         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1106 }
1107
1108 #define PHY_BUSY_LOOPS  5000
1109
1110 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1111                          u32 *val)
1112 {
1113         u32 frame_val;
1114         unsigned int loops;
1115         int ret;
1116
1117         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1118                 tw32_f(MAC_MI_MODE,
1119                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1120                 udelay(80);
1121         }
1122
1123         tg3_ape_lock(tp, tp->phy_ape_lock);
1124
1125         *val = 0x0;
1126
1127         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1128                       MI_COM_PHY_ADDR_MASK);
1129         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1130                       MI_COM_REG_ADDR_MASK);
1131         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1132
1133         tw32_f(MAC_MI_COM, frame_val);
1134
1135         loops = PHY_BUSY_LOOPS;
1136         while (loops != 0) {
1137                 udelay(10);
1138                 frame_val = tr32(MAC_MI_COM);
1139
1140                 if ((frame_val & MI_COM_BUSY) == 0) {
1141                         udelay(5);
1142                         frame_val = tr32(MAC_MI_COM);
1143                         break;
1144                 }
1145                 loops -= 1;
1146         }
1147
1148         ret = -EBUSY;
1149         if (loops != 0) {
1150                 *val = frame_val & MI_COM_DATA_MASK;
1151                 ret = 0;
1152         }
1153
1154         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1155                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1156                 udelay(80);
1157         }
1158
1159         tg3_ape_unlock(tp, tp->phy_ape_lock);
1160
1161         return ret;
1162 }
1163
1164 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1165 {
1166         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1167 }
1168
1169 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1170                           u32 val)
1171 {
1172         u32 frame_val;
1173         unsigned int loops;
1174         int ret;
1175
1176         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1177             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1178                 return 0;
1179
1180         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1181                 tw32_f(MAC_MI_MODE,
1182                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1183                 udelay(80);
1184         }
1185
1186         tg3_ape_lock(tp, tp->phy_ape_lock);
1187
1188         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1189                       MI_COM_PHY_ADDR_MASK);
1190         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1191                       MI_COM_REG_ADDR_MASK);
1192         frame_val |= (val & MI_COM_DATA_MASK);
1193         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1194
1195         tw32_f(MAC_MI_COM, frame_val);
1196
1197         loops = PHY_BUSY_LOOPS;
1198         while (loops != 0) {
1199                 udelay(10);
1200                 frame_val = tr32(MAC_MI_COM);
1201                 if ((frame_val & MI_COM_BUSY) == 0) {
1202                         udelay(5);
1203                         frame_val = tr32(MAC_MI_COM);
1204                         break;
1205                 }
1206                 loops -= 1;
1207         }
1208
1209         ret = -EBUSY;
1210         if (loops != 0)
1211                 ret = 0;
1212
1213         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1214                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1215                 udelay(80);
1216         }
1217
1218         tg3_ape_unlock(tp, tp->phy_ape_lock);
1219
1220         return ret;
1221 }
1222
1223 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1224 {
1225         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1226 }
1227
1228 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1229 {
1230         int err;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1237         if (err)
1238                 goto done;
1239
1240         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1241                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1242         if (err)
1243                 goto done;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1246
1247 done:
1248         return err;
1249 }
1250
1251 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1252 {
1253         int err;
1254
1255         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1260         if (err)
1261                 goto done;
1262
1263         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1264                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1265         if (err)
1266                 goto done;
1267
1268         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1269
1270 done:
1271         return err;
1272 }
1273
1274 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1275 {
1276         int err;
1277
1278         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1279         if (!err)
1280                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1281
1282         return err;
1283 }
1284
1285 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1286 {
1287         int err;
1288
1289         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1290         if (!err)
1291                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1292
1293         return err;
1294 }
1295
1296 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1297 {
1298         int err;
1299
1300         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1301                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1302                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1303         if (!err)
1304                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1305
1306         return err;
1307 }
1308
1309 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1310 {
1311         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1312                 set |= MII_TG3_AUXCTL_MISC_WREN;
1313
1314         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1315 }
1316
1317 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1318 {
1319         u32 val;
1320         int err;
1321
1322         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1323
1324         if (err)
1325                 return err;
1326
1327         if (enable)
1328                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1329         else
1330                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1331
1332         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1333                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1334
1335         return err;
1336 }
1337
1338 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1339 {
1340         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1341                             reg | val | MII_TG3_MISC_SHDW_WREN);
1342 }
1343
1344 static int tg3_bmcr_reset(struct tg3 *tp)
1345 {
1346         u32 phy_control;
1347         int limit, err;
1348
1349         /* OK, reset it, and poll the BMCR_RESET bit until it
1350          * clears or we time out.
1351          */
1352         phy_control = BMCR_RESET;
1353         err = tg3_writephy(tp, MII_BMCR, phy_control);
1354         if (err != 0)
1355                 return -EBUSY;
1356
1357         limit = 5000;
1358         while (limit--) {
1359                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1360                 if (err != 0)
1361                         return -EBUSY;
1362
1363                 if ((phy_control & BMCR_RESET) == 0) {
1364                         udelay(40);
1365                         break;
1366                 }
1367                 udelay(10);
1368         }
1369         if (limit < 0)
1370                 return -EBUSY;
1371
1372         return 0;
1373 }
1374
1375 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 val;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (__tg3_readphy(tp, mii_id, reg, &val))
1383                 val = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return val;
1388 }
1389
1390 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1391 {
1392         struct tg3 *tp = bp->priv;
1393         u32 ret = 0;
1394
1395         spin_lock_bh(&tp->lock);
1396
1397         if (__tg3_writephy(tp, mii_id, reg, val))
1398                 ret = -EIO;
1399
1400         spin_unlock_bh(&tp->lock);
1401
1402         return ret;
1403 }
1404
1405 static void tg3_mdio_config_5785(struct tg3 *tp)
1406 {
1407         u32 val;
1408         struct phy_device *phydev;
1409
1410         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1411         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1412         case PHY_ID_BCM50610:
1413         case PHY_ID_BCM50610M:
1414                 val = MAC_PHYCFG2_50610_LED_MODES;
1415                 break;
1416         case PHY_ID_BCMAC131:
1417                 val = MAC_PHYCFG2_AC131_LED_MODES;
1418                 break;
1419         case PHY_ID_RTL8211C:
1420                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1421                 break;
1422         case PHY_ID_RTL8201E:
1423                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1424                 break;
1425         default:
1426                 return;
1427         }
1428
1429         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1430                 tw32(MAC_PHYCFG2, val);
1431
1432                 val = tr32(MAC_PHYCFG1);
1433                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1434                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1435                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1436                 tw32(MAC_PHYCFG1, val);
1437
1438                 return;
1439         }
1440
1441         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1442                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1443                        MAC_PHYCFG2_FMODE_MASK_MASK |
1444                        MAC_PHYCFG2_GMODE_MASK_MASK |
1445                        MAC_PHYCFG2_ACT_MASK_MASK   |
1446                        MAC_PHYCFG2_QUAL_MASK_MASK |
1447                        MAC_PHYCFG2_INBAND_ENABLE;
1448
1449         tw32(MAC_PHYCFG2, val);
1450
1451         val = tr32(MAC_PHYCFG1);
1452         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1453                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1454         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1455                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1456                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1457                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1458                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1459         }
1460         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1461                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1462         tw32(MAC_PHYCFG1, val);
1463
1464         val = tr32(MAC_EXT_RGMII_MODE);
1465         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1466                  MAC_RGMII_MODE_RX_QUALITY |
1467                  MAC_RGMII_MODE_RX_ACTIVITY |
1468                  MAC_RGMII_MODE_RX_ENG_DET |
1469                  MAC_RGMII_MODE_TX_ENABLE |
1470                  MAC_RGMII_MODE_TX_LOWPWR |
1471                  MAC_RGMII_MODE_TX_RESET);
1472         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1473                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1474                         val |= MAC_RGMII_MODE_RX_INT_B |
1475                                MAC_RGMII_MODE_RX_QUALITY |
1476                                MAC_RGMII_MODE_RX_ACTIVITY |
1477                                MAC_RGMII_MODE_RX_ENG_DET;
1478                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1479                         val |= MAC_RGMII_MODE_TX_ENABLE |
1480                                MAC_RGMII_MODE_TX_LOWPWR |
1481                                MAC_RGMII_MODE_TX_RESET;
1482         }
1483         tw32(MAC_EXT_RGMII_MODE, val);
1484 }
1485
1486 static void tg3_mdio_start(struct tg3 *tp)
1487 {
1488         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1489         tw32_f(MAC_MI_MODE, tp->mi_mode);
1490         udelay(80);
1491
1492         if (tg3_flag(tp, MDIOBUS_INITED) &&
1493             tg3_asic_rev(tp) == ASIC_REV_5785)
1494                 tg3_mdio_config_5785(tp);
1495 }
1496
1497 static int tg3_mdio_init(struct tg3 *tp)
1498 {
1499         int i;
1500         u32 reg;
1501         struct phy_device *phydev;
1502
1503         if (tg3_flag(tp, 5717_PLUS)) {
1504                 u32 is_serdes;
1505
1506                 tp->phy_addr = tp->pci_fn + 1;
1507
1508                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1509                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1510                 else
1511                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1512                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1513                 if (is_serdes)
1514                         tp->phy_addr += 7;
1515         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1516                 int addr;
1517
1518                 addr = ssb_gige_get_phyaddr(tp->pdev);
1519                 if (addr < 0)
1520                         return addr;
1521                 tp->phy_addr = addr;
1522         } else
1523                 tp->phy_addr = TG3_PHY_MII_ADDR;
1524
1525         tg3_mdio_start(tp);
1526
1527         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1528                 return 0;
1529
1530         tp->mdio_bus = mdiobus_alloc();
1531         if (tp->mdio_bus == NULL)
1532                 return -ENOMEM;
1533
1534         tp->mdio_bus->name     = "tg3 mdio bus";
1535         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1536                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1537         tp->mdio_bus->priv     = tp;
1538         tp->mdio_bus->parent   = &tp->pdev->dev;
1539         tp->mdio_bus->read     = &tg3_mdio_read;
1540         tp->mdio_bus->write    = &tg3_mdio_write;
1541         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1542
1543         /* The bus registration will look for all the PHYs on the mdio bus.
1544          * Unfortunately, it does not ensure the PHY is powered up before
1545          * accessing the PHY ID registers.  A chip reset is the
1546          * quickest way to bring the device back to an operational state..
1547          */
1548         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1549                 tg3_bmcr_reset(tp);
1550
1551         i = mdiobus_register(tp->mdio_bus);
1552         if (i) {
1553                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1554                 mdiobus_free(tp->mdio_bus);
1555                 return i;
1556         }
1557
1558         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1559
1560         if (!phydev || !phydev->drv) {
1561                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1562                 mdiobus_unregister(tp->mdio_bus);
1563                 mdiobus_free(tp->mdio_bus);
1564                 return -ENODEV;
1565         }
1566
1567         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1568         case PHY_ID_BCM57780:
1569                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1570                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1571                 break;
1572         case PHY_ID_BCM50610:
1573         case PHY_ID_BCM50610M:
1574                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1575                                      PHY_BRCM_RX_REFCLK_UNUSED |
1576                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1577                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1579                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1580                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1581                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1582                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1583                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1584                 /* fallthru */
1585         case PHY_ID_RTL8211C:
1586                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587                 break;
1588         case PHY_ID_RTL8201E:
1589         case PHY_ID_BCMAC131:
1590                 phydev->interface = PHY_INTERFACE_MODE_MII;
1591                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593                 break;
1594         }
1595
1596         tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599                 tg3_mdio_config_5785(tp);
1600
1601         return 0;
1602 }
1603
1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606         if (tg3_flag(tp, MDIOBUS_INITED)) {
1607                 tg3_flag_clear(tp, MDIOBUS_INITED);
1608                 mdiobus_unregister(tp->mdio_bus);
1609                 mdiobus_free(tp->mdio_bus);
1610         }
1611 }
1612
1613 /* tp->lock is held. */
1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616         u32 val;
1617
1618         val = tr32(GRC_RX_CPU_EVENT);
1619         val |= GRC_RX_CPU_DRIVER_EVENT;
1620         tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622         tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630         int i;
1631         unsigned int delay_cnt;
1632         long time_remain;
1633
1634         /* If enough time has passed, no wait is necessary. */
1635         time_remain = (long)(tp->last_event_jiffies + 1 +
1636                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637                       (long)jiffies;
1638         if (time_remain < 0)
1639                 return;
1640
1641         /* Check if we can shorten the wait time. */
1642         delay_cnt = jiffies_to_usecs(time_remain);
1643         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645         delay_cnt = (delay_cnt >> 3) + 1;
1646
1647         for (i = 0; i < delay_cnt; i++) {
1648                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649                         break;
1650                 if (pci_channel_offline(tp->pdev))
1651                         break;
1652
1653                 udelay(8);
1654         }
1655 }
1656
1657 /* tp->lock is held. */
1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660         u32 reg, val;
1661
1662         val = 0;
1663         if (!tg3_readphy(tp, MII_BMCR, &reg))
1664                 val = reg << 16;
1665         if (!tg3_readphy(tp, MII_BMSR, &reg))
1666                 val |= (reg & 0xffff);
1667         *data++ = val;
1668
1669         val = 0;
1670         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1671                 val = reg << 16;
1672         if (!tg3_readphy(tp, MII_LPA, &reg))
1673                 val |= (reg & 0xffff);
1674         *data++ = val;
1675
1676         val = 0;
1677         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1679                         val = reg << 16;
1680                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1681                         val |= (reg & 0xffff);
1682         }
1683         *data++ = val;
1684
1685         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1686                 val = reg << 16;
1687         else
1688                 val = 0;
1689         *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695         u32 data[4];
1696
1697         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698                 return;
1699
1700         tg3_phy_gather_ump_data(tp, data);
1701
1702         tg3_wait_for_event_ack(tp);
1703
1704         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711         tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718                 /* Wait for RX cpu to ACK the previous event. */
1719                 tg3_wait_for_event_ack(tp);
1720
1721                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723                 tg3_generate_fw_event(tp);
1724
1725                 /* Wait for RX cpu to ACK this event. */
1726                 tg3_wait_for_event_ack(tp);
1727         }
1728 }
1729
1730 /* tp->lock is held. */
1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737                 switch (kind) {
1738                 case RESET_KIND_INIT:
1739                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740                                       DRV_STATE_START);
1741                         break;
1742
1743                 case RESET_KIND_SHUTDOWN:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_UNLOAD);
1746                         break;
1747
1748                 case RESET_KIND_SUSPEND:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_SUSPEND);
1751                         break;
1752
1753                 default:
1754                         break;
1755                 }
1756         }
1757 }
1758
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763                 switch (kind) {
1764                 case RESET_KIND_INIT:
1765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766                                       DRV_STATE_START_DONE);
1767                         break;
1768
1769                 case RESET_KIND_SHUTDOWN:
1770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771                                       DRV_STATE_UNLOAD_DONE);
1772                         break;
1773
1774                 default:
1775                         break;
1776                 }
1777         }
1778 }
1779
1780 /* tp->lock is held. */
1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783         if (tg3_flag(tp, ENABLE_ASF)) {
1784                 switch (kind) {
1785                 case RESET_KIND_INIT:
1786                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787                                       DRV_STATE_START);
1788                         break;
1789
1790                 case RESET_KIND_SHUTDOWN:
1791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792                                       DRV_STATE_UNLOAD);
1793                         break;
1794
1795                 case RESET_KIND_SUSPEND:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_SUSPEND);
1798                         break;
1799
1800                 default:
1801                         break;
1802                 }
1803         }
1804 }
1805
1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808         int i;
1809         u32 val;
1810
1811         if (tg3_flag(tp, NO_FWARE_REPORTED))
1812                 return 0;
1813
1814         if (tg3_flag(tp, IS_SSB_CORE)) {
1815                 /* We don't use firmware. */
1816                 return 0;
1817         }
1818
1819         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820                 /* Wait up to 20ms for init done. */
1821                 for (i = 0; i < 200; i++) {
1822                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823                                 return 0;
1824                         if (pci_channel_offline(tp->pdev))
1825                                 return -ENODEV;
1826
1827                         udelay(100);
1828                 }
1829                 return -ENODEV;
1830         }
1831
1832         /* Wait for firmware initialization to complete. */
1833         for (i = 0; i < 100000; i++) {
1834                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836                         break;
1837                 if (pci_channel_offline(tp->pdev)) {
1838                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840                                 netdev_info(tp->dev, "No firmware running\n");
1841                         }
1842
1843                         break;
1844                 }
1845
1846                 udelay(10);
1847         }
1848
1849         /* Chip might not be fitted with firmware.  Some Sun onboard
1850          * parts are configured like that.  So don't signal the timeout
1851          * of the above loop as an error, but do report the lack of
1852          * running firmware once.
1853          */
1854         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857                 netdev_info(tp->dev, "No firmware running\n");
1858         }
1859
1860         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861                 /* The 57765 A0 needs a little more
1862                  * time to do some important work.
1863                  */
1864                 mdelay(10);
1865         }
1866
1867         return 0;
1868 }
1869
1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872         if (!netif_carrier_ok(tp->dev)) {
1873                 netif_info(tp, link, tp->dev, "Link is down\n");
1874                 tg3_ump_link_report(tp);
1875         } else if (netif_msg_link(tp)) {
1876                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877                             (tp->link_config.active_speed == SPEED_1000 ?
1878                              1000 :
1879                              (tp->link_config.active_speed == SPEED_100 ?
1880                               100 : 10)),
1881                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1882                              "full" : "half"));
1883
1884                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886                             "on" : "off",
1887                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888                             "on" : "off");
1889
1890                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891                         netdev_info(tp->dev, "EEE is %s\n",
1892                                     tp->setlpicnt ? "enabled" : "disabled");
1893
1894                 tg3_ump_link_report(tp);
1895         }
1896
1897         tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902         u32 flowctrl = 0;
1903
1904         if (adv & ADVERTISE_PAUSE_CAP) {
1905                 flowctrl |= FLOW_CTRL_RX;
1906                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907                         flowctrl |= FLOW_CTRL_TX;
1908         } else if (adv & ADVERTISE_PAUSE_ASYM)
1909                 flowctrl |= FLOW_CTRL_TX;
1910
1911         return flowctrl;
1912 }
1913
1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916         u16 miireg;
1917
1918         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919                 miireg = ADVERTISE_1000XPAUSE;
1920         else if (flow_ctrl & FLOW_CTRL_TX)
1921                 miireg = ADVERTISE_1000XPSE_ASYM;
1922         else if (flow_ctrl & FLOW_CTRL_RX)
1923                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924         else
1925                 miireg = 0;
1926
1927         return miireg;
1928 }
1929
1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932         u32 flowctrl = 0;
1933
1934         if (adv & ADVERTISE_1000XPAUSE) {
1935                 flowctrl |= FLOW_CTRL_RX;
1936                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937                         flowctrl |= FLOW_CTRL_TX;
1938         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939                 flowctrl |= FLOW_CTRL_TX;
1940
1941         return flowctrl;
1942 }
1943
1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946         u8 cap = 0;
1947
1948         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951                 if (lcladv & ADVERTISE_1000XPAUSE)
1952                         cap = FLOW_CTRL_RX;
1953                 if (rmtadv & ADVERTISE_1000XPAUSE)
1954                         cap = FLOW_CTRL_TX;
1955         }
1956
1957         return cap;
1958 }
1959
1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962         u8 autoneg;
1963         u8 flowctrl = 0;
1964         u32 old_rx_mode = tp->rx_mode;
1965         u32 old_tx_mode = tp->tx_mode;
1966
1967         if (tg3_flag(tp, USE_PHYLIB))
1968                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969         else
1970                 autoneg = tp->link_config.autoneg;
1971
1972         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975                 else
1976                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977         } else
1978                 flowctrl = tp->link_config.flowctrl;
1979
1980         tp->link_config.active_flowctrl = flowctrl;
1981
1982         if (flowctrl & FLOW_CTRL_RX)
1983                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984         else
1985                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987         if (old_rx_mode != tp->rx_mode)
1988                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990         if (flowctrl & FLOW_CTRL_TX)
1991                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992         else
1993                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995         if (old_tx_mode != tp->tx_mode)
1996                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001         u8 oldflowctrl, linkmesg = 0;
2002         u32 mac_mode, lcl_adv, rmt_adv;
2003         struct tg3 *tp = netdev_priv(dev);
2004         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006         spin_lock_bh(&tp->lock);
2007
2008         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009                                     MAC_MODE_HALF_DUPLEX);
2010
2011         oldflowctrl = tp->link_config.active_flowctrl;
2012
2013         if (phydev->link) {
2014                 lcl_adv = 0;
2015                 rmt_adv = 0;
2016
2017                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2019                 else if (phydev->speed == SPEED_1000 ||
2020                          tg3_asic_rev(tp) != ASIC_REV_5785)
2021                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022                 else
2023                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025                 if (phydev->duplex == DUPLEX_HALF)
2026                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2027                 else {
2028                         lcl_adv = mii_advertise_flowctrl(
2029                                   tp->link_config.flowctrl);
2030
2031                         if (phydev->pause)
2032                                 rmt_adv = LPA_PAUSE_CAP;
2033                         if (phydev->asym_pause)
2034                                 rmt_adv |= LPA_PAUSE_ASYM;
2035                 }
2036
2037                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038         } else
2039                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041         if (mac_mode != tp->mac_mode) {
2042                 tp->mac_mode = mac_mode;
2043                 tw32_f(MAC_MODE, tp->mac_mode);
2044                 udelay(40);
2045         }
2046
2047         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048                 if (phydev->speed == SPEED_10)
2049                         tw32(MAC_MI_STAT,
2050                              MAC_MI_STAT_10MBPS_MODE |
2051                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052                 else
2053                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054         }
2055
2056         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057                 tw32(MAC_TX_LENGTHS,
2058                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059                       (6 << TX_LENGTHS_IPG_SHIFT) |
2060                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061         else
2062                 tw32(MAC_TX_LENGTHS,
2063                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064                       (6 << TX_LENGTHS_IPG_SHIFT) |
2065                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067         if (phydev->link != tp->old_link ||
2068             phydev->speed != tp->link_config.active_speed ||
2069             phydev->duplex != tp->link_config.active_duplex ||
2070             oldflowctrl != tp->link_config.active_flowctrl)
2071                 linkmesg = 1;
2072
2073         tp->old_link = phydev->link;
2074         tp->link_config.active_speed = phydev->speed;
2075         tp->link_config.active_duplex = phydev->duplex;
2076
2077         spin_unlock_bh(&tp->lock);
2078
2079         if (linkmesg)
2080                 tg3_link_report(tp);
2081 }
2082
2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085         struct phy_device *phydev;
2086
2087         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088                 return 0;
2089
2090         /* Bring the PHY back to a known state. */
2091         tg3_bmcr_reset(tp);
2092
2093         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095         /* Attach the MAC to the PHY. */
2096         phydev = phy_connect(tp->dev, phydev_name(phydev),
2097                              tg3_adjust_link, phydev->interface);
2098         if (IS_ERR(phydev)) {
2099                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100                 return PTR_ERR(phydev);
2101         }
2102
2103         /* Mask with MAC supported features. */
2104         switch (phydev->interface) {
2105         case PHY_INTERFACE_MODE_GMII:
2106         case PHY_INTERFACE_MODE_RGMII:
2107                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108                         phydev->supported &= (PHY_GBIT_FEATURES |
2109                                               SUPPORTED_Pause |
2110                                               SUPPORTED_Asym_Pause);
2111                         break;
2112                 }
2113                 /* fallthru */
2114         case PHY_INTERFACE_MODE_MII:
2115                 phydev->supported &= (PHY_BASIC_FEATURES |
2116                                       SUPPORTED_Pause |
2117                                       SUPPORTED_Asym_Pause);
2118                 break;
2119         default:
2120                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2121                 return -EINVAL;
2122         }
2123
2124         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2125
2126         phydev->advertising = phydev->supported;
2127
2128         phy_attached_info(phydev);
2129
2130         return 0;
2131 }
2132
2133 static void tg3_phy_start(struct tg3 *tp)
2134 {
2135         struct phy_device *phydev;
2136
2137         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2138                 return;
2139
2140         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2141
2142         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2143                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2144                 phydev->speed = tp->link_config.speed;
2145                 phydev->duplex = tp->link_config.duplex;
2146                 phydev->autoneg = tp->link_config.autoneg;
2147                 phydev->advertising = tp->link_config.advertising;
2148         }
2149
2150         phy_start(phydev);
2151
2152         phy_start_aneg(phydev);
2153 }
2154
2155 static void tg3_phy_stop(struct tg3 *tp)
2156 {
2157         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2158                 return;
2159
2160         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2161 }
2162
2163 static void tg3_phy_fini(struct tg3 *tp)
2164 {
2165         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2166                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2167                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2168         }
2169 }
2170
2171 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2172 {
2173         int err;
2174         u32 val;
2175
2176         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2177                 return 0;
2178
2179         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2180                 /* Cannot do read-modify-write on 5401 */
2181                 err = tg3_phy_auxctl_write(tp,
2182                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2183                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2184                                            0x4c20);
2185                 goto done;
2186         }
2187
2188         err = tg3_phy_auxctl_read(tp,
2189                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2190         if (err)
2191                 return err;
2192
2193         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2194         err = tg3_phy_auxctl_write(tp,
2195                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2196
2197 done:
2198         return err;
2199 }
2200
2201 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2202 {
2203         u32 phytest;
2204
2205         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2206                 u32 phy;
2207
2208                 tg3_writephy(tp, MII_TG3_FET_TEST,
2209                              phytest | MII_TG3_FET_SHADOW_EN);
2210                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2211                         if (enable)
2212                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213                         else
2214                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2215                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2216                 }
2217                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2218         }
2219 }
2220
2221 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2222 {
2223         u32 reg;
2224
2225         if (!tg3_flag(tp, 5705_PLUS) ||
2226             (tg3_flag(tp, 5717_PLUS) &&
2227              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2228                 return;
2229
2230         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2231                 tg3_phy_fet_toggle_apd(tp, enable);
2232                 return;
2233         }
2234
2235         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2236               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2237               MII_TG3_MISC_SHDW_SCR5_SDTL |
2238               MII_TG3_MISC_SHDW_SCR5_C125OE;
2239         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2240                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2241
2242         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2243
2244
2245         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2246         if (enable)
2247                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2248
2249         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2250 }
2251
2252 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 {
2254         u32 phy;
2255
2256         if (!tg3_flag(tp, 5705_PLUS) ||
2257             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2258                 return;
2259
2260         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2261                 u32 ephy;
2262
2263                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2264                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2265
2266                         tg3_writephy(tp, MII_TG3_FET_TEST,
2267                                      ephy | MII_TG3_FET_SHADOW_EN);
2268                         if (!tg3_readphy(tp, reg, &phy)) {
2269                                 if (enable)
2270                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271                                 else
2272                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273                                 tg3_writephy(tp, reg, phy);
2274                         }
2275                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276                 }
2277         } else {
2278                 int ret;
2279
2280                 ret = tg3_phy_auxctl_read(tp,
2281                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2282                 if (!ret) {
2283                         if (enable)
2284                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285                         else
2286                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287                         tg3_phy_auxctl_write(tp,
2288                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289                 }
2290         }
2291 }
2292
2293 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 {
2295         int ret;
2296         u32 val;
2297
2298         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2299                 return;
2300
2301         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2302         if (!ret)
2303                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2304                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2305 }
2306
2307 static void tg3_phy_apply_otp(struct tg3 *tp)
2308 {
2309         u32 otp, phy;
2310
2311         if (!tp->phy_otp)
2312                 return;
2313
2314         otp = tp->phy_otp;
2315
2316         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2317                 return;
2318
2319         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2320         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2321         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2322
2323         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2324               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2325         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2326
2327         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2328         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2329         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2330
2331         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2332         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2333
2334         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2335         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2336
2337         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2338               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2339         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2340
2341         tg3_phy_toggle_auxctl_smdsp(tp, false);
2342 }
2343
2344 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2345 {
2346         u32 val;
2347         struct ethtool_eee *dest = &tp->eee;
2348
2349         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2350                 return;
2351
2352         if (eee)
2353                 dest = eee;
2354
2355         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2356                 return;
2357
2358         /* Pull eee_active */
2359         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2360             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2361                 dest->eee_active = 1;
2362         } else
2363                 dest->eee_active = 0;
2364
2365         /* Pull lp advertised settings */
2366         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2367                 return;
2368         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2369
2370         /* Pull advertised and eee_enabled settings */
2371         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2372                 return;
2373         dest->eee_enabled = !!val;
2374         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2375
2376         /* Pull tx_lpi_enabled */
2377         val = tr32(TG3_CPMU_EEE_MODE);
2378         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2379
2380         /* Pull lpi timer value */
2381         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2382 }
2383
2384 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2385 {
2386         u32 val;
2387
2388         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2389                 return;
2390
2391         tp->setlpicnt = 0;
2392
2393         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2394             current_link_up &&
2395             tp->link_config.active_duplex == DUPLEX_FULL &&
2396             (tp->link_config.active_speed == SPEED_100 ||
2397              tp->link_config.active_speed == SPEED_1000)) {
2398                 u32 eeectl;
2399
2400                 if (tp->link_config.active_speed == SPEED_1000)
2401                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2402                 else
2403                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2404
2405                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2406
2407                 tg3_eee_pull_config(tp, NULL);
2408                 if (tp->eee.eee_active)
2409                         tp->setlpicnt = 2;
2410         }
2411
2412         if (!tp->setlpicnt) {
2413                 if (current_link_up &&
2414                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2415                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2416                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2417                 }
2418
2419                 val = tr32(TG3_CPMU_EEE_MODE);
2420                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2421         }
2422 }
2423
2424 static void tg3_phy_eee_enable(struct tg3 *tp)
2425 {
2426         u32 val;
2427
2428         if (tp->link_config.active_speed == SPEED_1000 &&
2429             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2430              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2431              tg3_flag(tp, 57765_CLASS)) &&
2432             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2433                 val = MII_TG3_DSP_TAP26_ALNOKO |
2434                       MII_TG3_DSP_TAP26_RMRXSTO;
2435                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2436                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2437         }
2438
2439         val = tr32(TG3_CPMU_EEE_MODE);
2440         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2441 }
2442
2443 static int tg3_wait_macro_done(struct tg3 *tp)
2444 {
2445         int limit = 100;
2446
2447         while (limit--) {
2448                 u32 tmp32;
2449
2450                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2451                         if ((tmp32 & 0x1000) == 0)
2452                                 break;
2453                 }
2454         }
2455         if (limit < 0)
2456                 return -EBUSY;
2457
2458         return 0;
2459 }
2460
2461 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2462 {
2463         static const u32 test_pat[4][6] = {
2464         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2465         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2466         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2467         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2468         };
2469         int chan;
2470
2471         for (chan = 0; chan < 4; chan++) {
2472                 int i;
2473
2474                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2475                              (chan * 0x2000) | 0x0200);
2476                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2477
2478                 for (i = 0; i < 6; i++)
2479                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2480                                      test_pat[chan][i]);
2481
2482                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2483                 if (tg3_wait_macro_done(tp)) {
2484                         *resetp = 1;
2485                         return -EBUSY;
2486                 }
2487
2488                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2489                              (chan * 0x2000) | 0x0200);
2490                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2491                 if (tg3_wait_macro_done(tp)) {
2492                         *resetp = 1;
2493                         return -EBUSY;
2494                 }
2495
2496                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2497                 if (tg3_wait_macro_done(tp)) {
2498                         *resetp = 1;
2499                         return -EBUSY;
2500                 }
2501
2502                 for (i = 0; i < 6; i += 2) {
2503                         u32 low, high;
2504
2505                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2506                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2507                             tg3_wait_macro_done(tp)) {
2508                                 *resetp = 1;
2509                                 return -EBUSY;
2510                         }
2511                         low &= 0x7fff;
2512                         high &= 0x000f;
2513                         if (low != test_pat[chan][i] ||
2514                             high != test_pat[chan][i+1]) {
2515                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2516                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2517                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2518
2519                                 return -EBUSY;
2520                         }
2521                 }
2522         }
2523
2524         return 0;
2525 }
2526
2527 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2528 {
2529         int chan;
2530
2531         for (chan = 0; chan < 4; chan++) {
2532                 int i;
2533
2534                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2535                              (chan * 0x2000) | 0x0200);
2536                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2537                 for (i = 0; i < 6; i++)
2538                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2539                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2540                 if (tg3_wait_macro_done(tp))
2541                         return -EBUSY;
2542         }
2543
2544         return 0;
2545 }
2546
2547 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2548 {
2549         u32 reg32, phy9_orig;
2550         int retries, do_phy_reset, err;
2551
2552         retries = 10;
2553         do_phy_reset = 1;
2554         do {
2555                 if (do_phy_reset) {
2556                         err = tg3_bmcr_reset(tp);
2557                         if (err)
2558                                 return err;
2559                         do_phy_reset = 0;
2560                 }
2561
2562                 /* Disable transmitter and interrupt.  */
2563                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2564                         continue;
2565
2566                 reg32 |= 0x3000;
2567                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2568
2569                 /* Set full-duplex, 1000 mbps.  */
2570                 tg3_writephy(tp, MII_BMCR,
2571                              BMCR_FULLDPLX | BMCR_SPEED1000);
2572
2573                 /* Set to master mode.  */
2574                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2575                         continue;
2576
2577                 tg3_writephy(tp, MII_CTRL1000,
2578                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2579
2580                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2581                 if (err)
2582                         return err;
2583
2584                 /* Block the PHY control access.  */
2585                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2586
2587                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2588                 if (!err)
2589                         break;
2590         } while (--retries);
2591
2592         err = tg3_phy_reset_chanpat(tp);
2593         if (err)
2594                 return err;
2595
2596         tg3_phydsp_write(tp, 0x8005, 0x0000);
2597
2598         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2599         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2600
2601         tg3_phy_toggle_auxctl_smdsp(tp, false);
2602
2603         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2604
2605         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2606         if (err)
2607                 return err;
2608
2609         reg32 &= ~0x3000;
2610         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2611
2612         return 0;
2613 }
2614
2615 static void tg3_carrier_off(struct tg3 *tp)
2616 {
2617         netif_carrier_off(tp->dev);
2618         tp->link_up = false;
2619 }
2620
2621 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2622 {
2623         if (tg3_flag(tp, ENABLE_ASF))
2624                 netdev_warn(tp->dev,
2625                             "Management side-band traffic will be interrupted during phy settings change\n");
2626 }
2627
2628 /* This will reset the tigon3 PHY if there is no valid
2629  * link unless the FORCE argument is non-zero.
2630  */
2631 static int tg3_phy_reset(struct tg3 *tp)
2632 {
2633         u32 val, cpmuctrl;
2634         int err;
2635
2636         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2637                 val = tr32(GRC_MISC_CFG);
2638                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2639                 udelay(40);
2640         }
2641         err  = tg3_readphy(tp, MII_BMSR, &val);
2642         err |= tg3_readphy(tp, MII_BMSR, &val);
2643         if (err != 0)
2644                 return -EBUSY;
2645
2646         if (netif_running(tp->dev) && tp->link_up) {
2647                 netif_carrier_off(tp->dev);
2648                 tg3_link_report(tp);
2649         }
2650
2651         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2652             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2653             tg3_asic_rev(tp) == ASIC_REV_5705) {
2654                 err = tg3_phy_reset_5703_4_5(tp);
2655                 if (err)
2656                         return err;
2657                 goto out;
2658         }
2659
2660         cpmuctrl = 0;
2661         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2662             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2663                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2664                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2665                         tw32(TG3_CPMU_CTRL,
2666                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2667         }
2668
2669         err = tg3_bmcr_reset(tp);
2670         if (err)
2671                 return err;
2672
2673         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2674                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2675                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2676
2677                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2678         }
2679
2680         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2681             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2682                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2683                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2684                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2685                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2686                         udelay(40);
2687                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2688                 }
2689         }
2690
2691         if (tg3_flag(tp, 5717_PLUS) &&
2692             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2693                 return 0;
2694
2695         tg3_phy_apply_otp(tp);
2696
2697         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2698                 tg3_phy_toggle_apd(tp, true);
2699         else
2700                 tg3_phy_toggle_apd(tp, false);
2701
2702 out:
2703         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2704             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2705                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2706                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2707                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2708         }
2709
2710         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2711                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2712                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2713         }
2714
2715         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2716                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2717                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2718                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2719                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2720                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2721                 }
2722         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2723                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2724                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2725                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2726                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2727                                 tg3_writephy(tp, MII_TG3_TEST1,
2728                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2729                         } else
2730                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2731
2732                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2733                 }
2734         }
2735
2736         /* Set Extended packet length bit (bit 14) on all chips that */
2737         /* support jumbo frames */
2738         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2739                 /* Cannot do read-modify-write on 5401 */
2740                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2741         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2742                 /* Set bit 14 with read-modify-write to preserve other bits */
2743                 err = tg3_phy_auxctl_read(tp,
2744                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2745                 if (!err)
2746                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2747                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2748         }
2749
2750         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2751          * jumbo frames transmission.
2752          */
2753         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2754                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2755                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2756                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2757         }
2758
2759         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2760                 /* adjust output voltage */
2761                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2762         }
2763
2764         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2765                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2766
2767         tg3_phy_toggle_automdix(tp, true);
2768         tg3_phy_set_wirespeed(tp);
2769         return 0;
2770 }
2771
2772 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2773 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2774 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2775                                           TG3_GPIO_MSG_NEED_VAUX)
2776 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2777         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2778          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2779          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2780          (TG3_GPIO_MSG_DRVR_PRES << 12))
2781
2782 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2783         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2784          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2785          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2786          (TG3_GPIO_MSG_NEED_VAUX << 12))
2787
2788 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2789 {
2790         u32 status, shift;
2791
2792         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2793             tg3_asic_rev(tp) == ASIC_REV_5719)
2794                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2795         else
2796                 status = tr32(TG3_CPMU_DRV_STATUS);
2797
2798         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2799         status &= ~(TG3_GPIO_MSG_MASK << shift);
2800         status |= (newstat << shift);
2801
2802         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2803             tg3_asic_rev(tp) == ASIC_REV_5719)
2804                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2805         else
2806                 tw32(TG3_CPMU_DRV_STATUS, status);
2807
2808         return status >> TG3_APE_GPIO_MSG_SHIFT;
2809 }
2810
2811 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2812 {
2813         if (!tg3_flag(tp, IS_NIC))
2814                 return 0;
2815
2816         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2817             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2818             tg3_asic_rev(tp) == ASIC_REV_5720) {
2819                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2820                         return -EIO;
2821
2822                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2823
2824                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2825                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2826
2827                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2828         } else {
2829                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2830                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2831         }
2832
2833         return 0;
2834 }
2835
2836 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2837 {
2838         u32 grc_local_ctrl;
2839
2840         if (!tg3_flag(tp, IS_NIC) ||
2841             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2842             tg3_asic_rev(tp) == ASIC_REV_5701)
2843                 return;
2844
2845         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2846
2847         tw32_wait_f(GRC_LOCAL_CTRL,
2848                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2849                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2850
2851         tw32_wait_f(GRC_LOCAL_CTRL,
2852                     grc_local_ctrl,
2853                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2854
2855         tw32_wait_f(GRC_LOCAL_CTRL,
2856                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2857                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 }
2859
2860 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2861 {
2862         if (!tg3_flag(tp, IS_NIC))
2863                 return;
2864
2865         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2866             tg3_asic_rev(tp) == ASIC_REV_5701) {
2867                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2868                             (GRC_LCLCTRL_GPIO_OE0 |
2869                              GRC_LCLCTRL_GPIO_OE1 |
2870                              GRC_LCLCTRL_GPIO_OE2 |
2871                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2872                              GRC_LCLCTRL_GPIO_OUTPUT1),
2873                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2874         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2875                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2876                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2877                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2878                                      GRC_LCLCTRL_GPIO_OE1 |
2879                                      GRC_LCLCTRL_GPIO_OE2 |
2880                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2881                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2882                                      tp->grc_local_ctrl;
2883                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2885
2886                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2887                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2891                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2892                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2893         } else {
2894                 u32 no_gpio2;
2895                 u32 grc_local_ctrl = 0;
2896
2897                 /* Workaround to prevent overdrawing Amps. */
2898                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2899                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2900                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2901                                     grc_local_ctrl,
2902                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2903                 }
2904
2905                 /* On 5753 and variants, GPIO2 cannot be used. */
2906                 no_gpio2 = tp->nic_sram_data_cfg &
2907                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2908
2909                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2910                                   GRC_LCLCTRL_GPIO_OE1 |
2911                                   GRC_LCLCTRL_GPIO_OE2 |
2912                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2913                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2914                 if (no_gpio2) {
2915                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2916                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2917                 }
2918                 tw32_wait_f(GRC_LOCAL_CTRL,
2919                             tp->grc_local_ctrl | grc_local_ctrl,
2920                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2921
2922                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2923
2924                 tw32_wait_f(GRC_LOCAL_CTRL,
2925                             tp->grc_local_ctrl | grc_local_ctrl,
2926                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2927
2928                 if (!no_gpio2) {
2929                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2930                         tw32_wait_f(GRC_LOCAL_CTRL,
2931                                     tp->grc_local_ctrl | grc_local_ctrl,
2932                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2933                 }
2934         }
2935 }
2936
2937 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2938 {
2939         u32 msg = 0;
2940
2941         /* Serialize power state transitions */
2942         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2943                 return;
2944
2945         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2946                 msg = TG3_GPIO_MSG_NEED_VAUX;
2947
2948         msg = tg3_set_function_status(tp, msg);
2949
2950         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2951                 goto done;
2952
2953         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2954                 tg3_pwrsrc_switch_to_vaux(tp);
2955         else
2956                 tg3_pwrsrc_die_with_vmain(tp);
2957
2958 done:
2959         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2960 }
2961
2962 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2963 {
2964         bool need_vaux = false;
2965
2966         /* The GPIOs do something completely different on 57765. */
2967         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2968                 return;
2969
2970         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2971             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2972             tg3_asic_rev(tp) == ASIC_REV_5720) {
2973                 tg3_frob_aux_power_5717(tp, include_wol ?
2974                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2975                 return;
2976         }
2977
2978         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2979                 struct net_device *dev_peer;
2980
2981                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2982
2983                 /* remove_one() may have been run on the peer. */
2984                 if (dev_peer) {
2985                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2986
2987                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2988                                 return;
2989
2990                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2991                             tg3_flag(tp_peer, ENABLE_ASF))
2992                                 need_vaux = true;
2993                 }
2994         }
2995
2996         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2997             tg3_flag(tp, ENABLE_ASF))
2998                 need_vaux = true;
2999
3000         if (need_vaux)
3001                 tg3_pwrsrc_switch_to_vaux(tp);
3002         else
3003                 tg3_pwrsrc_die_with_vmain(tp);
3004 }
3005
3006 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3007 {
3008         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3009                 return 1;
3010         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3011                 if (speed != SPEED_10)
3012                         return 1;
3013         } else if (speed == SPEED_10)
3014                 return 1;
3015
3016         return 0;
3017 }
3018
3019 static bool tg3_phy_power_bug(struct tg3 *tp)
3020 {
3021         switch (tg3_asic_rev(tp)) {
3022         case ASIC_REV_5700:
3023         case ASIC_REV_5704:
3024                 return true;
3025         case ASIC_REV_5780:
3026                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3027                         return true;
3028                 return false;
3029         case ASIC_REV_5717:
3030                 if (!tp->pci_fn)
3031                         return true;
3032                 return false;
3033         case ASIC_REV_5719:
3034         case ASIC_REV_5720:
3035                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3036                     !tp->pci_fn)
3037                         return true;
3038                 return false;
3039         }
3040
3041         return false;
3042 }
3043
3044 static bool tg3_phy_led_bug(struct tg3 *tp)
3045 {
3046         switch (tg3_asic_rev(tp)) {
3047         case ASIC_REV_5719:
3048         case ASIC_REV_5720:
3049                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3050                     !tp->pci_fn)
3051                         return true;
3052                 return false;
3053         }
3054
3055         return false;
3056 }
3057
3058 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3059 {
3060         u32 val;
3061
3062         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3063                 return;
3064
3065         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3066                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3067                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3068                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3069
3070                         sg_dig_ctrl |=
3071                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3072                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3073                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3074                 }
3075                 return;
3076         }
3077
3078         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3079                 tg3_bmcr_reset(tp);
3080                 val = tr32(GRC_MISC_CFG);
3081                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3082                 udelay(40);
3083                 return;
3084         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3085                 u32 phytest;
3086                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3087                         u32 phy;
3088
3089                         tg3_writephy(tp, MII_ADVERTISE, 0);
3090                         tg3_writephy(tp, MII_BMCR,
3091                                      BMCR_ANENABLE | BMCR_ANRESTART);
3092
3093                         tg3_writephy(tp, MII_TG3_FET_TEST,
3094                                      phytest | MII_TG3_FET_SHADOW_EN);
3095                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3096                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3097                                 tg3_writephy(tp,
3098                                              MII_TG3_FET_SHDW_AUXMODE4,
3099                                              phy);
3100                         }
3101                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3102                 }
3103                 return;
3104         } else if (do_low_power) {
3105                 if (!tg3_phy_led_bug(tp))
3106                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3107                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3108
3109                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3110                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3111                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3112                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3113         }
3114
3115         /* The PHY should not be powered down on some chips because
3116          * of bugs.
3117          */
3118         if (tg3_phy_power_bug(tp))
3119                 return;
3120
3121         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3122             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3123                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3124                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3125                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3126                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3127         }
3128
3129         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3130 }
3131
3132 /* tp->lock is held. */
3133 static int tg3_nvram_lock(struct tg3 *tp)
3134 {
3135         if (tg3_flag(tp, NVRAM)) {
3136                 int i;
3137
3138                 if (tp->nvram_lock_cnt == 0) {
3139                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3140                         for (i = 0; i < 8000; i++) {
3141                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3142                                         break;
3143                                 udelay(20);
3144                         }
3145                         if (i == 8000) {
3146                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3147                                 return -ENODEV;
3148                         }
3149                 }
3150                 tp->nvram_lock_cnt++;
3151         }
3152         return 0;
3153 }
3154
3155 /* tp->lock is held. */
3156 static void tg3_nvram_unlock(struct tg3 *tp)
3157 {
3158         if (tg3_flag(tp, NVRAM)) {
3159                 if (tp->nvram_lock_cnt > 0)
3160                         tp->nvram_lock_cnt--;
3161                 if (tp->nvram_lock_cnt == 0)
3162                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3163         }
3164 }
3165
3166 /* tp->lock is held. */
3167 static void tg3_enable_nvram_access(struct tg3 *tp)
3168 {
3169         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3170                 u32 nvaccess = tr32(NVRAM_ACCESS);
3171
3172                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3173         }
3174 }
3175
3176 /* tp->lock is held. */
3177 static void tg3_disable_nvram_access(struct tg3 *tp)
3178 {
3179         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3180                 u32 nvaccess = tr32(NVRAM_ACCESS);
3181
3182                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3183         }
3184 }
3185
3186 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3187                                         u32 offset, u32 *val)
3188 {
3189         u32 tmp;
3190         int i;
3191
3192         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3193                 return -EINVAL;
3194
3195         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3196                                         EEPROM_ADDR_DEVID_MASK |
3197                                         EEPROM_ADDR_READ);
3198         tw32(GRC_EEPROM_ADDR,
3199              tmp |
3200              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3201              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3202               EEPROM_ADDR_ADDR_MASK) |
3203              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3204
3205         for (i = 0; i < 1000; i++) {
3206                 tmp = tr32(GRC_EEPROM_ADDR);
3207
3208                 if (tmp & EEPROM_ADDR_COMPLETE)
3209                         break;
3210                 msleep(1);
3211         }
3212         if (!(tmp & EEPROM_ADDR_COMPLETE))
3213                 return -EBUSY;
3214
3215         tmp = tr32(GRC_EEPROM_DATA);
3216
3217         /*
3218          * The data will always be opposite the native endian
3219          * format.  Perform a blind byteswap to compensate.
3220          */
3221         *val = swab32(tmp);
3222
3223         return 0;
3224 }
3225
3226 #define NVRAM_CMD_TIMEOUT 5000
3227
3228 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3229 {
3230         int i;
3231
3232         tw32(NVRAM_CMD, nvram_cmd);
3233         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3234                 usleep_range(10, 40);
3235                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3236                         udelay(10);
3237                         break;
3238                 }
3239         }
3240
3241         if (i == NVRAM_CMD_TIMEOUT)
3242                 return -EBUSY;
3243
3244         return 0;
3245 }
3246
3247 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3248 {
3249         if (tg3_flag(tp, NVRAM) &&
3250             tg3_flag(tp, NVRAM_BUFFERED) &&
3251             tg3_flag(tp, FLASH) &&
3252             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3253             (tp->nvram_jedecnum == JEDEC_ATMEL))
3254
3255                 addr = ((addr / tp->nvram_pagesize) <<
3256                         ATMEL_AT45DB0X1B_PAGE_POS) +
3257                        (addr % tp->nvram_pagesize);
3258
3259         return addr;
3260 }
3261
3262 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3263 {
3264         if (tg3_flag(tp, NVRAM) &&
3265             tg3_flag(tp, NVRAM_BUFFERED) &&
3266             tg3_flag(tp, FLASH) &&
3267             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3268             (tp->nvram_jedecnum == JEDEC_ATMEL))
3269
3270                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3271                         tp->nvram_pagesize) +
3272                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3273
3274         return addr;
3275 }
3276
3277 /* NOTE: Data read in from NVRAM is byteswapped according to
3278  * the byteswapping settings for all other register accesses.
3279  * tg3 devices are BE devices, so on a BE machine, the data
3280  * returned will be exactly as it is seen in NVRAM.  On a LE
3281  * machine, the 32-bit value will be byteswapped.
3282  */
3283 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3284 {
3285         int ret;
3286
3287         if (!tg3_flag(tp, NVRAM))
3288                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3289
3290         offset = tg3_nvram_phys_addr(tp, offset);
3291
3292         if (offset > NVRAM_ADDR_MSK)
3293                 return -EINVAL;
3294
3295         ret = tg3_nvram_lock(tp);
3296         if (ret)
3297                 return ret;
3298
3299         tg3_enable_nvram_access(tp);
3300
3301         tw32(NVRAM_ADDR, offset);
3302         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3303                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3304
3305         if (ret == 0)
3306                 *val = tr32(NVRAM_RDDATA);
3307
3308         tg3_disable_nvram_access(tp);
3309
3310         tg3_nvram_unlock(tp);
3311
3312         return ret;
3313 }
3314
3315 /* Ensures NVRAM data is in bytestream format. */
3316 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3317 {
3318         u32 v;
3319         int res = tg3_nvram_read(tp, offset, &v);
3320         if (!res)
3321                 *val = cpu_to_be32(v);
3322         return res;
3323 }
3324
3325 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3326                                     u32 offset, u32 len, u8 *buf)
3327 {
3328         int i, j, rc = 0;
3329         u32 val;
3330
3331         for (i = 0; i < len; i += 4) {
3332                 u32 addr;
3333                 __be32 data;
3334
3335                 addr = offset + i;
3336
3337                 memcpy(&data, buf + i, 4);
3338
3339                 /*
3340                  * The SEEPROM interface expects the data to always be opposite
3341                  * the native endian format.  We accomplish this by reversing
3342                  * all the operations that would have been performed on the
3343                  * data from a call to tg3_nvram_read_be32().
3344                  */
3345                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3346
3347                 val = tr32(GRC_EEPROM_ADDR);
3348                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3349
3350                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3351                         EEPROM_ADDR_READ);
3352                 tw32(GRC_EEPROM_ADDR, val |
3353                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3354                         (addr & EEPROM_ADDR_ADDR_MASK) |
3355                         EEPROM_ADDR_START |
3356                         EEPROM_ADDR_WRITE);
3357
3358                 for (j = 0; j < 1000; j++) {
3359                         val = tr32(GRC_EEPROM_ADDR);
3360
3361                         if (val & EEPROM_ADDR_COMPLETE)
3362                                 break;
3363                         msleep(1);
3364                 }
3365                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3366                         rc = -EBUSY;
3367                         break;
3368                 }
3369         }
3370
3371         return rc;
3372 }
3373
3374 /* offset and length are dword aligned */
3375 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3376                 u8 *buf)
3377 {
3378         int ret = 0;
3379         u32 pagesize = tp->nvram_pagesize;
3380         u32 pagemask = pagesize - 1;
3381         u32 nvram_cmd;
3382         u8 *tmp;
3383
3384         tmp = kmalloc(pagesize, GFP_KERNEL);
3385         if (tmp == NULL)
3386                 return -ENOMEM;
3387
3388         while (len) {
3389                 int j;
3390                 u32 phy_addr, page_off, size;
3391
3392                 phy_addr = offset & ~pagemask;
3393
3394                 for (j = 0; j < pagesize; j += 4) {
3395                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3396                                                   (__be32 *) (tmp + j));
3397                         if (ret)
3398                                 break;
3399                 }
3400                 if (ret)
3401                         break;
3402
3403                 page_off = offset & pagemask;
3404                 size = pagesize;
3405                 if (len < size)
3406                         size = len;
3407
3408                 len -= size;
3409
3410                 memcpy(tmp + page_off, buf, size);
3411
3412                 offset = offset + (pagesize - page_off);
3413
3414                 tg3_enable_nvram_access(tp);
3415
3416                 /*
3417                  * Before we can erase the flash page, we need
3418                  * to issue a special "write enable" command.
3419                  */
3420                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3421
3422                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3423                         break;
3424
3425                 /* Erase the target page */
3426                 tw32(NVRAM_ADDR, phy_addr);
3427
3428                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3429                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3430
3431                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3432                         break;
3433
3434                 /* Issue another write enable to start the write. */
3435                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3436
3437                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3438                         break;
3439
3440                 for (j = 0; j < pagesize; j += 4) {
3441                         __be32 data;
3442
3443                         data = *((__be32 *) (tmp + j));
3444
3445                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3446
3447                         tw32(NVRAM_ADDR, phy_addr + j);
3448
3449                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3450                                 NVRAM_CMD_WR;
3451
3452                         if (j == 0)
3453                                 nvram_cmd |= NVRAM_CMD_FIRST;
3454                         else if (j == (pagesize - 4))
3455                                 nvram_cmd |= NVRAM_CMD_LAST;
3456
3457                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3458                         if (ret)
3459                                 break;
3460                 }
3461                 if (ret)
3462                         break;
3463         }
3464
3465         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3466         tg3_nvram_exec_cmd(tp, nvram_cmd);
3467
3468         kfree(tmp);
3469
3470         return ret;
3471 }
3472
3473 /* offset and length are dword aligned */
3474 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3475                 u8 *buf)
3476 {
3477         int i, ret = 0;
3478
3479         for (i = 0; i < len; i += 4, offset += 4) {
3480                 u32 page_off, phy_addr, nvram_cmd;
3481                 __be32 data;
3482
3483                 memcpy(&data, buf + i, 4);
3484                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3485
3486                 page_off = offset % tp->nvram_pagesize;
3487
3488                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3489
3490                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3491
3492                 if (page_off == 0 || i == 0)
3493                         nvram_cmd |= NVRAM_CMD_FIRST;
3494                 if (page_off == (tp->nvram_pagesize - 4))
3495                         nvram_cmd |= NVRAM_CMD_LAST;
3496
3497                 if (i == (len - 4))
3498                         nvram_cmd |= NVRAM_CMD_LAST;
3499
3500                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3501                     !tg3_flag(tp, FLASH) ||
3502                     !tg3_flag(tp, 57765_PLUS))
3503                         tw32(NVRAM_ADDR, phy_addr);
3504
3505                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3506                     !tg3_flag(tp, 5755_PLUS) &&
3507                     (tp->nvram_jedecnum == JEDEC_ST) &&
3508                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3509                         u32 cmd;
3510
3511                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3512                         ret = tg3_nvram_exec_cmd(tp, cmd);
3513                         if (ret)
3514                                 break;
3515                 }
3516                 if (!tg3_flag(tp, FLASH)) {
3517                         /* We always do complete word writes to eeprom. */
3518                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3519                 }
3520
3521                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3522                 if (ret)
3523                         break;
3524         }
3525         return ret;
3526 }
3527
3528 /* offset and length are dword aligned */
3529 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3530 {
3531         int ret;
3532
3533         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3534                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3535                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3536                 udelay(40);
3537         }
3538
3539         if (!tg3_flag(tp, NVRAM)) {
3540                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3541         } else {
3542                 u32 grc_mode;
3543
3544                 ret = tg3_nvram_lock(tp);
3545                 if (ret)
3546                         return ret;
3547
3548                 tg3_enable_nvram_access(tp);
3549                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3550                         tw32(NVRAM_WRITE1, 0x406);
3551
3552                 grc_mode = tr32(GRC_MODE);
3553                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3554
3555                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3556                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3557                                 buf);
3558                 } else {
3559                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3560                                 buf);
3561                 }
3562
3563                 grc_mode = tr32(GRC_MODE);
3564                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3565
3566                 tg3_disable_nvram_access(tp);
3567                 tg3_nvram_unlock(tp);
3568         }
3569
3570         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3571                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3572                 udelay(40);
3573         }
3574
3575         return ret;
3576 }
3577
3578 #define RX_CPU_SCRATCH_BASE     0x30000
3579 #define RX_CPU_SCRATCH_SIZE     0x04000
3580 #define TX_CPU_SCRATCH_BASE     0x34000
3581 #define TX_CPU_SCRATCH_SIZE     0x04000
3582
3583 /* tp->lock is held. */
3584 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3585 {
3586         int i;
3587         const int iters = 10000;
3588
3589         for (i = 0; i < iters; i++) {
3590                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3591                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3592                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3593                         break;
3594                 if (pci_channel_offline(tp->pdev))
3595                         return -EBUSY;
3596         }
3597
3598         return (i == iters) ? -EBUSY : 0;
3599 }
3600
3601 /* tp->lock is held. */
3602 static int tg3_rxcpu_pause(struct tg3 *tp)
3603 {
3604         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3605
3606         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3607         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3608         udelay(10);
3609
3610         return rc;
3611 }
3612
3613 /* tp->lock is held. */
3614 static int tg3_txcpu_pause(struct tg3 *tp)
3615 {
3616         return tg3_pause_cpu(tp, TX_CPU_BASE);
3617 }
3618
3619 /* tp->lock is held. */
3620 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3621 {
3622         tw32(cpu_base + CPU_STATE, 0xffffffff);
3623         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3624 }
3625
3626 /* tp->lock is held. */
3627 static void tg3_rxcpu_resume(struct tg3 *tp)
3628 {
3629         tg3_resume_cpu(tp, RX_CPU_BASE);
3630 }
3631
3632 /* tp->lock is held. */
3633 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3634 {
3635         int rc;
3636
3637         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3638
3639         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3640                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3641
3642                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3643                 return 0;
3644         }
3645         if (cpu_base == RX_CPU_BASE) {
3646                 rc = tg3_rxcpu_pause(tp);
3647         } else {
3648                 /*
3649                  * There is only an Rx CPU for the 5750 derivative in the
3650                  * BCM4785.
3651                  */
3652                 if (tg3_flag(tp, IS_SSB_CORE))
3653                         return 0;
3654
3655                 rc = tg3_txcpu_pause(tp);
3656         }
3657
3658         if (rc) {
3659                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3660                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3661                 return -ENODEV;
3662         }
3663
3664         /* Clear firmware's nvram arbitration. */
3665         if (tg3_flag(tp, NVRAM))
3666                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3667         return 0;
3668 }
3669
3670 static int tg3_fw_data_len(struct tg3 *tp,
3671                            const struct tg3_firmware_hdr *fw_hdr)
3672 {
3673         int fw_len;
3674
3675         /* Non fragmented firmware have one firmware header followed by a
3676          * contiguous chunk of data to be written. The length field in that
3677          * header is not the length of data to be written but the complete
3678          * length of the bss. The data length is determined based on
3679          * tp->fw->size minus headers.
3680          *
3681          * Fragmented firmware have a main header followed by multiple
3682          * fragments. Each fragment is identical to non fragmented firmware
3683          * with a firmware header followed by a contiguous chunk of data. In
3684          * the main header, the length field is unused and set to 0xffffffff.
3685          * In each fragment header the length is the entire size of that
3686          * fragment i.e. fragment data + header length. Data length is
3687          * therefore length field in the header minus TG3_FW_HDR_LEN.
3688          */
3689         if (tp->fw_len == 0xffffffff)
3690                 fw_len = be32_to_cpu(fw_hdr->len);
3691         else
3692                 fw_len = tp->fw->size;
3693
3694         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3695 }
3696
3697 /* tp->lock is held. */
3698 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3699                                  u32 cpu_scratch_base, int cpu_scratch_size,
3700                                  const struct tg3_firmware_hdr *fw_hdr)
3701 {
3702         int err, i;
3703         void (*write_op)(struct tg3 *, u32, u32);
3704         int total_len = tp->fw->size;
3705
3706         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3707                 netdev_err(tp->dev,
3708                            "%s: Trying to load TX cpu firmware which is 5705\n",
3709                            __func__);
3710                 return -EINVAL;
3711         }
3712
3713         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3714                 write_op = tg3_write_mem;
3715         else
3716                 write_op = tg3_write_indirect_reg32;
3717
3718         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3719                 /* It is possible that bootcode is still loading at this point.
3720                  * Get the nvram lock first before halting the cpu.
3721                  */
3722                 int lock_err = tg3_nvram_lock(tp);
3723                 err = tg3_halt_cpu(tp, cpu_base);
3724                 if (!lock_err)
3725                         tg3_nvram_unlock(tp);
3726                 if (err)
3727                         goto out;
3728
3729                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3730                         write_op(tp, cpu_scratch_base + i, 0);
3731                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3732                 tw32(cpu_base + CPU_MODE,
3733                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3734         } else {
3735                 /* Subtract additional main header for fragmented firmware and
3736                  * advance to the first fragment
3737                  */
3738                 total_len -= TG3_FW_HDR_LEN;
3739                 fw_hdr++;
3740         }
3741
3742         do {
3743                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3744                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3745                         write_op(tp, cpu_scratch_base +
3746                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3747                                      (i * sizeof(u32)),
3748                                  be32_to_cpu(fw_data[i]));
3749
3750                 total_len -= be32_to_cpu(fw_hdr->len);
3751
3752                 /* Advance to next fragment */
3753                 fw_hdr = (struct tg3_firmware_hdr *)
3754                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3755         } while (total_len > 0);
3756
3757         err = 0;
3758
3759 out:
3760         return err;
3761 }
3762
3763 /* tp->lock is held. */
3764 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3765 {
3766         int i;
3767         const int iters = 5;
3768
3769         tw32(cpu_base + CPU_STATE, 0xffffffff);
3770         tw32_f(cpu_base + CPU_PC, pc);
3771
3772         for (i = 0; i < iters; i++) {
3773                 if (tr32(cpu_base + CPU_PC) == pc)
3774                         break;
3775                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3776                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3777                 tw32_f(cpu_base + CPU_PC, pc);
3778                 udelay(1000);
3779         }
3780
3781         return (i == iters) ? -EBUSY : 0;
3782 }
3783
3784 /* tp->lock is held. */
3785 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3786 {
3787         const struct tg3_firmware_hdr *fw_hdr;
3788         int err;
3789
3790         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3791
3792         /* Firmware blob starts with version numbers, followed by
3793            start address and length. We are setting complete length.
3794            length = end_address_of_bss - start_address_of_text.
3795            Remainder is the blob to be loaded contiguously
3796            from start address. */
3797
3798         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3799                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3800                                     fw_hdr);
3801         if (err)
3802                 return err;
3803
3804         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3805                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3806                                     fw_hdr);
3807         if (err)
3808                 return err;
3809
3810         /* Now startup only the RX cpu. */
3811         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3812                                        be32_to_cpu(fw_hdr->base_addr));
3813         if (err) {
3814                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3815                            "should be %08x\n", __func__,
3816                            tr32(RX_CPU_BASE + CPU_PC),
3817                                 be32_to_cpu(fw_hdr->base_addr));
3818                 return -ENODEV;
3819         }
3820
3821         tg3_rxcpu_resume(tp);
3822
3823         return 0;
3824 }
3825
3826 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3827 {
3828         const int iters = 1000;
3829         int i;
3830         u32 val;
3831
3832         /* Wait for boot code to complete initialization and enter service
3833          * loop. It is then safe to download service patches
3834          */
3835         for (i = 0; i < iters; i++) {
3836                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3837                         break;
3838
3839                 udelay(10);
3840         }
3841
3842         if (i == iters) {
3843                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3844                 return -EBUSY;
3845         }
3846
3847         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3848         if (val & 0xff) {
3849                 netdev_warn(tp->dev,
3850                             "Other patches exist. Not downloading EEE patch\n");
3851                 return -EEXIST;
3852         }
3853
3854         return 0;
3855 }
3856
3857 /* tp->lock is held. */
3858 static void tg3_load_57766_firmware(struct tg3 *tp)
3859 {
3860         struct tg3_firmware_hdr *fw_hdr;
3861
3862         if (!tg3_flag(tp, NO_NVRAM))
3863                 return;
3864
3865         if (tg3_validate_rxcpu_state(tp))
3866                 return;
3867
3868         if (!tp->fw)
3869                 return;
3870
3871         /* This firmware blob has a different format than older firmware
3872          * releases as given below. The main difference is we have fragmented
3873          * data to be written to non-contiguous locations.
3874          *
3875          * In the beginning we have a firmware header identical to other
3876          * firmware which consists of version, base addr and length. The length
3877          * here is unused and set to 0xffffffff.
3878          *
3879          * This is followed by a series of firmware fragments which are
3880          * individually identical to previous firmware. i.e. they have the
3881          * firmware header and followed by data for that fragment. The version
3882          * field of the individual fragment header is unused.
3883          */
3884
3885         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3886         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3887                 return;
3888
3889         if (tg3_rxcpu_pause(tp))
3890                 return;
3891
3892         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3893         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3894
3895         tg3_rxcpu_resume(tp);
3896 }
3897
3898 /* tp->lock is held. */
3899 static int tg3_load_tso_firmware(struct tg3 *tp)
3900 {
3901         const struct tg3_firmware_hdr *fw_hdr;
3902         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3903         int err;
3904
3905         if (!tg3_flag(tp, FW_TSO))
3906                 return 0;
3907
3908         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3909
3910         /* Firmware blob starts with version numbers, followed by
3911            start address and length. We are setting complete length.
3912            length = end_address_of_bss - start_address_of_text.
3913            Remainder is the blob to be loaded contiguously
3914            from start address. */
3915
3916         cpu_scratch_size = tp->fw_len;
3917
3918         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3919                 cpu_base = RX_CPU_BASE;
3920                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3921         } else {
3922                 cpu_base = TX_CPU_BASE;
3923                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3924                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3925         }
3926
3927         err = tg3_load_firmware_cpu(tp, cpu_base,
3928                                     cpu_scratch_base, cpu_scratch_size,
3929                                     fw_hdr);
3930         if (err)
3931                 return err;
3932
3933         /* Now startup the cpu. */
3934         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3935                                        be32_to_cpu(fw_hdr->base_addr));
3936         if (err) {
3937                 netdev_err(tp->dev,
3938                            "%s fails to set CPU PC, is %08x should be %08x\n",
3939                            __func__, tr32(cpu_base + CPU_PC),
3940                            be32_to_cpu(fw_hdr->base_addr));
3941                 return -ENODEV;
3942         }
3943
3944         tg3_resume_cpu(tp, cpu_base);
3945         return 0;
3946 }
3947
3948 /* tp->lock is held. */
3949 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3950 {
3951         u32 addr_high, addr_low;
3952
3953         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3954         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3955                     (mac_addr[4] <<  8) | mac_addr[5]);
3956
3957         if (index < 4) {
3958                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3959                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3960         } else {
3961                 index -= 4;
3962                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3963                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3964         }
3965 }
3966
3967 /* tp->lock is held. */
3968 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3969 {
3970         u32 addr_high;
3971         int i;
3972
3973         for (i = 0; i < 4; i++) {
3974                 if (i == 1 && skip_mac_1)
3975                         continue;
3976                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3977         }
3978
3979         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3980             tg3_asic_rev(tp) == ASIC_REV_5704) {
3981                 for (i = 4; i < 16; i++)
3982                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3983         }
3984
3985         addr_high = (tp->dev->dev_addr[0] +
3986                      tp->dev->dev_addr[1] +
3987                      tp->dev->dev_addr[2] +
3988                      tp->dev->dev_addr[3] +
3989                      tp->dev->dev_addr[4] +
3990                      tp->dev->dev_addr[5]) &
3991                 TX_BACKOFF_SEED_MASK;
3992         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3993 }
3994
3995 static void tg3_enable_register_access(struct tg3 *tp)
3996 {
3997         /*
3998          * Make sure register accesses (indirect or otherwise) will function
3999          * correctly.
4000          */
4001         pci_write_config_dword(tp->pdev,
4002                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4003 }
4004
4005 static int tg3_power_up(struct tg3 *tp)
4006 {
4007         int err;
4008
4009         tg3_enable_register_access(tp);
4010
4011         err = pci_set_power_state(tp->pdev, PCI_D0);
4012         if (!err) {
4013                 /* Switch out of Vaux if it is a NIC */
4014                 tg3_pwrsrc_switch_to_vmain(tp);
4015         } else {
4016                 netdev_err(tp->dev, "Transition to D0 failed\n");
4017         }
4018
4019         return err;
4020 }
4021
4022 static int tg3_setup_phy(struct tg3 *, bool);
4023
4024 static int tg3_power_down_prepare(struct tg3 *tp)
4025 {
4026         u32 misc_host_ctrl;
4027         bool device_should_wake, do_low_power;
4028
4029         tg3_enable_register_access(tp);
4030
4031         /* Restore the CLKREQ setting. */
4032         if (tg3_flag(tp, CLKREQ_BUG))
4033                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4034                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4035
4036         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4037         tw32(TG3PCI_MISC_HOST_CTRL,
4038              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4039
4040         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4041                              tg3_flag(tp, WOL_ENABLE);
4042
4043         if (tg3_flag(tp, USE_PHYLIB)) {
4044                 do_low_power = false;
4045                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4046                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4047                         struct phy_device *phydev;
4048                         u32 phyid, advertising;
4049
4050                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051
4052                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053
4054                         tp->link_config.speed = phydev->speed;
4055                         tp->link_config.duplex = phydev->duplex;
4056                         tp->link_config.autoneg = phydev->autoneg;
4057                         tp->link_config.advertising = phydev->advertising;
4058
4059                         advertising = ADVERTISED_TP |
4060                                       ADVERTISED_Pause |
4061                                       ADVERTISED_Autoneg |
4062                                       ADVERTISED_10baseT_Half;
4063
4064                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4065                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4066                                         advertising |=
4067                                                 ADVERTISED_100baseT_Half |
4068                                                 ADVERTISED_100baseT_Full |
4069                                                 ADVERTISED_10baseT_Full;
4070                                 else
4071                                         advertising |= ADVERTISED_10baseT_Full;
4072                         }
4073
4074                         phydev->advertising = advertising;
4075
4076                         phy_start_aneg(phydev);
4077
4078                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4079                         if (phyid != PHY_ID_BCMAC131) {
4080                                 phyid &= PHY_BCM_OUI_MASK;
4081                                 if (phyid == PHY_BCM_OUI_1 ||
4082                                     phyid == PHY_BCM_OUI_2 ||
4083                                     phyid == PHY_BCM_OUI_3)
4084                                         do_low_power = true;
4085                         }
4086                 }
4087         } else {
4088                 do_low_power = true;
4089
4090                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4091                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4092
4093                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4094                         tg3_setup_phy(tp, false);
4095         }
4096
4097         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4098                 u32 val;
4099
4100                 val = tr32(GRC_VCPU_EXT_CTRL);
4101                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4102         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4103                 int i;
4104                 u32 val;
4105
4106                 for (i = 0; i < 200; i++) {
4107                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4108                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4109                                 break;
4110                         msleep(1);
4111                 }
4112         }
4113         if (tg3_flag(tp, WOL_CAP))
4114                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4115                                                      WOL_DRV_STATE_SHUTDOWN |
4116                                                      WOL_DRV_WOL |
4117                                                      WOL_SET_MAGIC_PKT);
4118
4119         if (device_should_wake) {
4120                 u32 mac_mode;
4121
4122                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4123                         if (do_low_power &&
4124                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4125                                 tg3_phy_auxctl_write(tp,
4126                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4127                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4128                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4129                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4130                                 udelay(40);
4131                         }
4132
4133                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4134                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4135                         else if (tp->phy_flags &
4136                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4137                                 if (tp->link_config.active_speed == SPEED_1000)
4138                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4139                                 else
4140                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4141                         } else
4142                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4143
4144                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4145                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4146                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4147                                              SPEED_100 : SPEED_10;
4148                                 if (tg3_5700_link_polarity(tp, speed))
4149                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4150                                 else
4151                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4152                         }
4153                 } else {
4154                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4155                 }
4156
4157                 if (!tg3_flag(tp, 5750_PLUS))
4158                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4159
4160                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4161                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4162                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4163                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4164
4165                 if (tg3_flag(tp, ENABLE_APE))
4166                         mac_mode |= MAC_MODE_APE_TX_EN |
4167                                     MAC_MODE_APE_RX_EN |
4168                                     MAC_MODE_TDE_ENABLE;
4169
4170                 tw32_f(MAC_MODE, mac_mode);
4171                 udelay(100);
4172
4173                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4174                 udelay(10);
4175         }
4176
4177         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4178             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4179              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4180                 u32 base_val;
4181
4182                 base_val = tp->pci_clock_ctrl;
4183                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4184                              CLOCK_CTRL_TXCLK_DISABLE);
4185
4186                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4187                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4188         } else if (tg3_flag(tp, 5780_CLASS) ||
4189                    tg3_flag(tp, CPMU_PRESENT) ||
4190                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4191                 /* do nothing */
4192         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4193                 u32 newbits1, newbits2;
4194
4195                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4196                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4197                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4198                                     CLOCK_CTRL_TXCLK_DISABLE |
4199                                     CLOCK_CTRL_ALTCLK);
4200                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4201                 } else if (tg3_flag(tp, 5705_PLUS)) {
4202                         newbits1 = CLOCK_CTRL_625_CORE;
4203                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4204                 } else {
4205                         newbits1 = CLOCK_CTRL_ALTCLK;
4206                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4207                 }
4208
4209                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4210                             40);
4211
4212                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4213                             40);
4214
4215                 if (!tg3_flag(tp, 5705_PLUS)) {
4216                         u32 newbits3;
4217
4218                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4219                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4220                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4221                                             CLOCK_CTRL_TXCLK_DISABLE |
4222                                             CLOCK_CTRL_44MHZ_CORE);
4223                         } else {
4224                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4225                         }
4226
4227                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4228                                     tp->pci_clock_ctrl | newbits3, 40);
4229                 }
4230         }
4231
4232         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4233                 tg3_power_down_phy(tp, do_low_power);
4234
4235         tg3_frob_aux_power(tp, true);
4236
4237         /* Workaround for unstable PLL clock */
4238         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4239             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4240              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4241                 u32 val = tr32(0x7d00);
4242
4243                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4244                 tw32(0x7d00, val);
4245                 if (!tg3_flag(tp, ENABLE_ASF)) {
4246                         int err;
4247
4248                         err = tg3_nvram_lock(tp);
4249                         tg3_halt_cpu(tp, RX_CPU_BASE);
4250                         if (!err)
4251                                 tg3_nvram_unlock(tp);
4252                 }
4253         }
4254
4255         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4256
4257         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4258
4259         return 0;
4260 }
4261
4262 static void tg3_power_down(struct tg3 *tp)
4263 {
4264         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4265         pci_set_power_state(tp->pdev, PCI_D3hot);
4266 }
4267
4268 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4269 {
4270         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4271         case MII_TG3_AUX_STAT_10HALF:
4272                 *speed = SPEED_10;
4273                 *duplex = DUPLEX_HALF;
4274                 break;
4275
4276         case MII_TG3_AUX_STAT_10FULL:
4277                 *speed = SPEED_10;
4278                 *duplex = DUPLEX_FULL;
4279                 break;
4280
4281         case MII_TG3_AUX_STAT_100HALF:
4282                 *speed = SPEED_100;
4283                 *duplex = DUPLEX_HALF;
4284                 break;
4285
4286         case MII_TG3_AUX_STAT_100FULL:
4287                 *speed = SPEED_100;
4288                 *duplex = DUPLEX_FULL;
4289                 break;
4290
4291         case MII_TG3_AUX_STAT_1000HALF:
4292                 *speed = SPEED_1000;
4293                 *duplex = DUPLEX_HALF;
4294                 break;
4295
4296         case MII_TG3_AUX_STAT_1000FULL:
4297                 *speed = SPEED_1000;
4298                 *duplex = DUPLEX_FULL;
4299                 break;
4300
4301         default:
4302                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4303                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4304                                  SPEED_10;
4305                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4306                                   DUPLEX_HALF;
4307                         break;
4308                 }
4309                 *speed = SPEED_UNKNOWN;
4310                 *duplex = DUPLEX_UNKNOWN;
4311                 break;
4312         }
4313 }
4314
4315 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4316 {
4317         int err = 0;
4318         u32 val, new_adv;
4319
4320         new_adv = ADVERTISE_CSMA;
4321         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4322         new_adv |= mii_advertise_flowctrl(flowctrl);
4323
4324         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4325         if (err)
4326                 goto done;
4327
4328         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4329                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4330
4331                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4332                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4333                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4334
4335                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4336                 if (err)
4337                         goto done;
4338         }
4339
4340         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4341                 goto done;
4342
4343         tw32(TG3_CPMU_EEE_MODE,
4344              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4345
4346         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4347         if (!err) {
4348                 u32 err2;
4349
4350                 val = 0;
4351                 /* Advertise 100-BaseTX EEE ability */
4352                 if (advertise & ADVERTISED_100baseT_Full)
4353                         val |= MDIO_AN_EEE_ADV_100TX;
4354                 /* Advertise 1000-BaseT EEE ability */
4355                 if (advertise & ADVERTISED_1000baseT_Full)
4356                         val |= MDIO_AN_EEE_ADV_1000T;
4357
4358                 if (!tp->eee.eee_enabled) {
4359                         val = 0;
4360                         tp->eee.advertised = 0;
4361                 } else {
4362                         tp->eee.advertised = advertise &
4363                                              (ADVERTISED_100baseT_Full |
4364                                               ADVERTISED_1000baseT_Full);
4365                 }
4366
4367                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4368                 if (err)
4369                         val = 0;
4370
4371                 switch (tg3_asic_rev(tp)) {
4372                 case ASIC_REV_5717:
4373                 case ASIC_REV_57765:
4374                 case ASIC_REV_57766:
4375                 case ASIC_REV_5719:
4376                         /* If we advertised any eee advertisements above... */
4377                         if (val)
4378                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4379                                       MII_TG3_DSP_TAP26_RMRXSTO |
4380                                       MII_TG3_DSP_TAP26_OPCSINPT;
4381                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4382                         /* Fall through */
4383                 case ASIC_REV_5720:
4384                 case ASIC_REV_5762:
4385                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4386                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4387                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4388                 }
4389
4390                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4391                 if (!err)
4392                         err = err2;
4393         }
4394
4395 done:
4396         return err;
4397 }
4398
4399 static void tg3_phy_copper_begin(struct tg3 *tp)
4400 {
4401         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4402             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4403                 u32 adv, fc;
4404
4405                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4406                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4407                         adv = ADVERTISED_10baseT_Half |
4408                               ADVERTISED_10baseT_Full;
4409                         if (tg3_flag(tp, WOL_SPEED_100MB))
4410                                 adv |= ADVERTISED_100baseT_Half |
4411                                        ADVERTISED_100baseT_Full;
4412                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4413                                 if (!(tp->phy_flags &
4414                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4415                                         adv |= ADVERTISED_1000baseT_Half;
4416                                 adv |= ADVERTISED_1000baseT_Full;
4417                         }
4418
4419                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4420                 } else {
4421                         adv = tp->link_config.advertising;
4422                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4423                                 adv &= ~(ADVERTISED_1000baseT_Half |
4424                                          ADVERTISED_1000baseT_Full);
4425
4426                         fc = tp->link_config.flowctrl;
4427                 }
4428
4429                 tg3_phy_autoneg_cfg(tp, adv, fc);
4430
4431                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4432                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4433                         /* Normally during power down we want to autonegotiate
4434                          * the lowest possible speed for WOL. However, to avoid
4435                          * link flap, we leave it untouched.
4436                          */
4437                         return;
4438                 }
4439
4440                 tg3_writephy(tp, MII_BMCR,
4441                              BMCR_ANENABLE | BMCR_ANRESTART);
4442         } else {
4443                 int i;
4444                 u32 bmcr, orig_bmcr;
4445
4446                 tp->link_config.active_speed = tp->link_config.speed;
4447                 tp->link_config.active_duplex = tp->link_config.duplex;
4448
4449                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4450                         /* With autoneg disabled, 5715 only links up when the
4451                          * advertisement register has the configured speed
4452                          * enabled.
4453                          */
4454                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4455                 }
4456
4457                 bmcr = 0;
4458                 switch (tp->link_config.speed) {
4459                 default:
4460                 case SPEED_10:
4461                         break;
4462
4463                 case SPEED_100:
4464                         bmcr |= BMCR_SPEED100;
4465                         break;
4466
4467                 case SPEED_1000:
4468                         bmcr |= BMCR_SPEED1000;
4469                         break;
4470                 }
4471
4472                 if (tp->link_config.duplex == DUPLEX_FULL)
4473                         bmcr |= BMCR_FULLDPLX;
4474
4475                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4476                     (bmcr != orig_bmcr)) {
4477                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4478                         for (i = 0; i < 1500; i++) {
4479                                 u32 tmp;
4480
4481                                 udelay(10);
4482                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4483                                     tg3_readphy(tp, MII_BMSR, &tmp))
4484                                         continue;
4485                                 if (!(tmp & BMSR_LSTATUS)) {
4486                                         udelay(40);
4487                                         break;
4488                                 }
4489                         }
4490                         tg3_writephy(tp, MII_BMCR, bmcr);
4491                         udelay(40);
4492                 }
4493         }
4494 }
4495
4496 static int tg3_phy_pull_config(struct tg3 *tp)
4497 {
4498         int err;
4499         u32 val;
4500
4501         err = tg3_readphy(tp, MII_BMCR, &val);
4502         if (err)
4503                 goto done;
4504
4505         if (!(val & BMCR_ANENABLE)) {
4506                 tp->link_config.autoneg = AUTONEG_DISABLE;
4507                 tp->link_config.advertising = 0;
4508                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4509
4510                 err = -EIO;
4511
4512                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4513                 case 0:
4514                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4515                                 goto done;
4516
4517                         tp->link_config.speed = SPEED_10;
4518                         break;
4519                 case BMCR_SPEED100:
4520                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4521                                 goto done;
4522
4523                         tp->link_config.speed = SPEED_100;
4524                         break;
4525                 case BMCR_SPEED1000:
4526                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4527                                 tp->link_config.speed = SPEED_1000;
4528                                 break;
4529                         }
4530                         /* Fall through */
4531                 default:
4532                         goto done;
4533                 }
4534
4535                 if (val & BMCR_FULLDPLX)
4536                         tp->link_config.duplex = DUPLEX_FULL;
4537                 else
4538                         tp->link_config.duplex = DUPLEX_HALF;
4539
4540                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4541
4542                 err = 0;
4543                 goto done;
4544         }
4545
4546         tp->link_config.autoneg = AUTONEG_ENABLE;
4547         tp->link_config.advertising = ADVERTISED_Autoneg;
4548         tg3_flag_set(tp, PAUSE_AUTONEG);
4549
4550         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4551                 u32 adv;
4552
4553                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4554                 if (err)
4555                         goto done;
4556
4557                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4558                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4559
4560                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4561         } else {
4562                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4563         }
4564
4565         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4566                 u32 adv;
4567
4568                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4569                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4570                         if (err)
4571                                 goto done;
4572
4573                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4574                 } else {
4575                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4576                         if (err)
4577                                 goto done;
4578
4579                         adv = tg3_decode_flowctrl_1000X(val);
4580                         tp->link_config.flowctrl = adv;
4581
4582                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4583                         adv = mii_adv_to_ethtool_adv_x(val);
4584                 }
4585
4586                 tp->link_config.advertising |= adv;
4587         }
4588
4589 done:
4590         return err;
4591 }
4592
4593 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4594 {
4595         int err;
4596
4597         /* Turn off tap power management. */
4598         /* Set Extended packet length bit */
4599         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4600
4601         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4602         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4603         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4604         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4605         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4606
4607         udelay(40);
4608
4609         return err;
4610 }
4611
4612 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4613 {
4614         struct ethtool_eee eee;
4615
4616         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4617                 return true;
4618
4619         tg3_eee_pull_config(tp, &eee);
4620
4621         if (tp->eee.eee_enabled) {
4622                 if (tp->eee.advertised != eee.advertised ||
4623                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4624                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4625                         return false;
4626         } else {
4627                 /* EEE is disabled but we're advertising */
4628                 if (eee.advertised)
4629                         return false;
4630         }
4631
4632         return true;
4633 }
4634
4635 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4636 {
4637         u32 advmsk, tgtadv, advertising;
4638
4639         advertising = tp->link_config.advertising;
4640         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4641
4642         advmsk = ADVERTISE_ALL;
4643         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4644                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4645                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4646         }
4647
4648         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4649                 return false;
4650
4651         if ((*lcladv & advmsk) != tgtadv)
4652                 return false;
4653
4654         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4655                 u32 tg3_ctrl;
4656
4657                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4658
4659                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4660                         return false;
4661
4662                 if (tgtadv &&
4663                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4664                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4665                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4666                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4667                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4668                 } else {
4669                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4670                 }
4671
4672                 if (tg3_ctrl != tgtadv)
4673                         return false;
4674         }
4675
4676         return true;
4677 }
4678
4679 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4680 {
4681         u32 lpeth = 0;
4682
4683         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4684                 u32 val;
4685
4686                 if (tg3_readphy(tp, MII_STAT1000, &val))
4687                         return false;
4688
4689                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4690         }
4691
4692         if (tg3_readphy(tp, MII_LPA, rmtadv))
4693                 return false;
4694
4695         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4696         tp->link_config.rmt_adv = lpeth;
4697
4698         return true;
4699 }
4700
4701 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4702 {
4703         if (curr_link_up != tp->link_up) {
4704                 if (curr_link_up) {
4705                         netif_carrier_on(tp->dev);
4706                 } else {
4707                         netif_carrier_off(tp->dev);
4708                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4709                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4710                 }
4711
4712                 tg3_link_report(tp);
4713                 return true;
4714         }
4715
4716         return false;
4717 }
4718
4719 static void tg3_clear_mac_status(struct tg3 *tp)
4720 {
4721         tw32(MAC_EVENT, 0);
4722
4723         tw32_f(MAC_STATUS,
4724                MAC_STATUS_SYNC_CHANGED |
4725                MAC_STATUS_CFG_CHANGED |
4726                MAC_STATUS_MI_COMPLETION |
4727                MAC_STATUS_LNKSTATE_CHANGED);
4728         udelay(40);
4729 }
4730
4731 static void tg3_setup_eee(struct tg3 *tp)
4732 {
4733         u32 val;
4734
4735         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4736               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4737         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4738                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4739
4740         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4741
4742         tw32_f(TG3_CPMU_EEE_CTRL,
4743                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4744
4745         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4746               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4747               TG3_CPMU_EEEMD_LPI_IN_RX |
4748               TG3_CPMU_EEEMD_EEE_ENABLE;
4749
4750         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4751                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4752
4753         if (tg3_flag(tp, ENABLE_APE))
4754                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4755
4756         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4757
4758         tw32_f(TG3_CPMU_EEE_DBTMR1,
4759                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4760                (tp->eee.tx_lpi_timer & 0xffff));
4761
4762         tw32_f(TG3_CPMU_EEE_DBTMR2,
4763                TG3_CPMU_DBTMR2_APE_TX_2047US |
4764                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4765 }
4766
4767 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4768 {
4769         bool current_link_up;
4770         u32 bmsr, val;
4771         u32 lcl_adv, rmt_adv;
4772         u16 current_speed;
4773         u8 current_duplex;
4774         int i, err;
4775
4776         tg3_clear_mac_status(tp);
4777
4778         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4779                 tw32_f(MAC_MI_MODE,
4780                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4781                 udelay(80);
4782         }
4783
4784         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4785
4786         /* Some third-party PHYs need to be reset on link going
4787          * down.
4788          */
4789         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4790              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4791              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4792             tp->link_up) {
4793                 tg3_readphy(tp, MII_BMSR, &bmsr);
4794                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4795                     !(bmsr & BMSR_LSTATUS))
4796                         force_reset = true;
4797         }
4798         if (force_reset)
4799                 tg3_phy_reset(tp);
4800
4801         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4802                 tg3_readphy(tp, MII_BMSR, &bmsr);
4803                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4804                     !tg3_flag(tp, INIT_COMPLETE))
4805                         bmsr = 0;
4806
4807                 if (!(bmsr & BMSR_LSTATUS)) {
4808                         err = tg3_init_5401phy_dsp(tp);
4809                         if (err)
4810                                 return err;
4811
4812                         tg3_readphy(tp, MII_BMSR, &bmsr);
4813                         for (i = 0; i < 1000; i++) {
4814                                 udelay(10);
4815                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4816                                     (bmsr & BMSR_LSTATUS)) {
4817                                         udelay(40);
4818                                         break;
4819                                 }
4820                         }
4821
4822                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4823                             TG3_PHY_REV_BCM5401_B0 &&
4824                             !(bmsr & BMSR_LSTATUS) &&
4825                             tp->link_config.active_speed == SPEED_1000) {
4826                                 err = tg3_phy_reset(tp);
4827                                 if (!err)
4828                                         err = tg3_init_5401phy_dsp(tp);
4829                                 if (err)
4830                                         return err;
4831                         }
4832                 }
4833         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4834                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4835                 /* 5701 {A0,B0} CRC bug workaround */
4836                 tg3_writephy(tp, 0x15, 0x0a75);
4837                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4838                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4839                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4840         }
4841
4842         /* Clear pending interrupts... */
4843         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4844         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4845
4846         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4847                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4848         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4849                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4850
4851         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4852             tg3_asic_rev(tp) == ASIC_REV_5701) {
4853                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4854                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4855                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4856                 else
4857                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4858         }
4859
4860         current_link_up = false;
4861         current_speed = SPEED_UNKNOWN;
4862         current_duplex = DUPLEX_UNKNOWN;
4863         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4864         tp->link_config.rmt_adv = 0;
4865
4866         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4867                 err = tg3_phy_auxctl_read(tp,
4868                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4869                                           &val);
4870                 if (!err && !(val & (1 << 10))) {
4871                         tg3_phy_auxctl_write(tp,
4872                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4873                                              val | (1 << 10));
4874                         goto relink;
4875                 }
4876         }
4877
4878         bmsr = 0;
4879         for (i = 0; i < 100; i++) {
4880                 tg3_readphy(tp, MII_BMSR, &bmsr);
4881                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4882                     (bmsr & BMSR_LSTATUS))
4883                         break;
4884                 udelay(40);
4885         }
4886
4887         if (bmsr & BMSR_LSTATUS) {
4888                 u32 aux_stat, bmcr;
4889
4890                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4891                 for (i = 0; i < 2000; i++) {
4892                         udelay(10);
4893                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4894                             aux_stat)
4895                                 break;
4896                 }
4897
4898                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4899                                              &current_speed,
4900                                              &current_duplex);
4901
4902                 bmcr = 0;
4903                 for (i = 0; i < 200; i++) {
4904                         tg3_readphy(tp, MII_BMCR, &bmcr);
4905                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4906                                 continue;
4907                         if (bmcr && bmcr != 0x7fff)
4908                                 break;
4909                         udelay(10);
4910                 }
4911
4912                 lcl_adv = 0;
4913                 rmt_adv = 0;
4914
4915                 tp->link_config.active_speed = current_speed;
4916                 tp->link_config.active_duplex = current_duplex;
4917
4918                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4919                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4920
4921                         if ((bmcr & BMCR_ANENABLE) &&
4922                             eee_config_ok &&
4923                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4924                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4925                                 current_link_up = true;
4926
4927                         /* EEE settings changes take effect only after a phy
4928                          * reset.  If we have skipped a reset due to Link Flap
4929                          * Avoidance being enabled, do it now.
4930                          */
4931                         if (!eee_config_ok &&
4932                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4933                             !force_reset) {
4934                                 tg3_setup_eee(tp);
4935                                 tg3_phy_reset(tp);
4936                         }
4937                 } else {
4938                         if (!(bmcr & BMCR_ANENABLE) &&
4939                             tp->link_config.speed == current_speed &&
4940                             tp->link_config.duplex == current_duplex) {
4941                                 current_link_up = true;
4942                         }
4943                 }
4944
4945                 if (current_link_up &&
4946                     tp->link_config.active_duplex == DUPLEX_FULL) {
4947                         u32 reg, bit;
4948
4949                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4950                                 reg = MII_TG3_FET_GEN_STAT;
4951                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4952                         } else {
4953                                 reg = MII_TG3_EXT_STAT;
4954                                 bit = MII_TG3_EXT_STAT_MDIX;
4955                         }
4956
4957                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4958                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4959
4960                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4961                 }
4962         }
4963
4964 relink:
4965         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4966                 tg3_phy_copper_begin(tp);
4967
4968                 if (tg3_flag(tp, ROBOSWITCH)) {
4969                         current_link_up = true;
4970                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4971                         current_speed = SPEED_1000;
4972                         current_duplex = DUPLEX_FULL;
4973                         tp->link_config.active_speed = current_speed;
4974                         tp->link_config.active_duplex = current_duplex;
4975                 }
4976
4977                 tg3_readphy(tp, MII_BMSR, &bmsr);
4978                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4979                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4980                         current_link_up = true;
4981         }
4982
4983         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4984         if (current_link_up) {
4985                 if (tp->link_config.active_speed == SPEED_100 ||
4986                     tp->link_config.active_speed == SPEED_10)
4987                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4988                 else
4989                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4990         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4991                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4992         else
4993                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4994
4995         /* In order for the 5750 core in BCM4785 chip to work properly
4996          * in RGMII mode, the Led Control Register must be set up.
4997          */
4998         if (tg3_flag(tp, RGMII_MODE)) {
4999                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5000                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5001
5002                 if (tp->link_config.active_speed == SPEED_10)
5003                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5004                 else if (tp->link_config.active_speed == SPEED_100)
5005                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5006                                      LED_CTRL_100MBPS_ON);
5007                 else if (tp->link_config.active_speed == SPEED_1000)
5008                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5009                                      LED_CTRL_1000MBPS_ON);
5010
5011                 tw32(MAC_LED_CTRL, led_ctrl);
5012                 udelay(40);
5013         }
5014
5015         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5016         if (tp->link_config.active_duplex == DUPLEX_HALF)
5017                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5018
5019         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5020                 if (current_link_up &&
5021                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5022                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5023                 else
5024                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5025         }
5026
5027         /* ??? Without this setting Netgear GA302T PHY does not
5028          * ??? send/receive packets...
5029          */
5030         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5031             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5032                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5033                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5034                 udelay(80);
5035         }
5036
5037         tw32_f(MAC_MODE, tp->mac_mode);
5038         udelay(40);
5039
5040         tg3_phy_eee_adjust(tp, current_link_up);
5041
5042         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5043                 /* Polled via timer. */
5044                 tw32_f(MAC_EVENT, 0);
5045         } else {
5046                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5047         }
5048         udelay(40);
5049
5050         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5051             current_link_up &&
5052             tp->link_config.active_speed == SPEED_1000 &&
5053             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5054                 udelay(120);
5055                 tw32_f(MAC_STATUS,
5056                      (MAC_STATUS_SYNC_CHANGED |
5057                       MAC_STATUS_CFG_CHANGED));
5058                 udelay(40);
5059                 tg3_write_mem(tp,
5060                               NIC_SRAM_FIRMWARE_MBOX,
5061                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5062         }
5063
5064         /* Prevent send BD corruption. */
5065         if (tg3_flag(tp, CLKREQ_BUG)) {
5066                 if (tp->link_config.active_speed == SPEED_100 ||
5067                     tp->link_config.active_speed == SPEED_10)
5068                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5069                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5070                 else
5071                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5072                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5073         }
5074
5075         tg3_test_and_report_link_chg(tp, current_link_up);
5076
5077         return 0;
5078 }
5079
5080 struct tg3_fiber_aneginfo {
5081         int state;
5082 #define ANEG_STATE_UNKNOWN              0
5083 #define ANEG_STATE_AN_ENABLE            1
5084 #define ANEG_STATE_RESTART_INIT         2
5085 #define ANEG_STATE_RESTART              3
5086 #define ANEG_STATE_DISABLE_LINK_OK      4
5087 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5088 #define ANEG_STATE_ABILITY_DETECT       6
5089 #define ANEG_STATE_ACK_DETECT_INIT      7
5090 #define ANEG_STATE_ACK_DETECT           8
5091 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5092 #define ANEG_STATE_COMPLETE_ACK         10
5093 #define ANEG_STATE_IDLE_DETECT_INIT     11
5094 #define ANEG_STATE_IDLE_DETECT          12
5095 #define ANEG_STATE_LINK_OK              13
5096 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5097 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5098
5099         u32 flags;
5100 #define MR_AN_ENABLE            0x00000001
5101 #define MR_RESTART_AN           0x00000002
5102 #define MR_AN_COMPLETE          0x00000004
5103 #define MR_PAGE_RX              0x00000008
5104 #define MR_NP_LOADED            0x00000010
5105 #define MR_TOGGLE_TX            0x00000020
5106 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5107 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5108 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5109 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5110 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5111 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5112 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5113 #define MR_TOGGLE_RX            0x00002000
5114 #define MR_NP_RX                0x00004000
5115
5116 #define MR_LINK_OK              0x80000000
5117
5118         unsigned long link_time, cur_time;
5119
5120         u32 ability_match_cfg;
5121         int ability_match_count;
5122
5123         char ability_match, idle_match, ack_match;
5124
5125         u32 txconfig, rxconfig;
5126 #define ANEG_CFG_NP             0x00000080
5127 #define ANEG_CFG_ACK            0x00000040
5128 #define ANEG_CFG_RF2            0x00000020
5129 #define ANEG_CFG_RF1            0x00000010
5130 #define ANEG_CFG_PS2            0x00000001
5131 #define ANEG_CFG_PS1            0x00008000
5132 #define ANEG_CFG_HD             0x00004000
5133 #define ANEG_CFG_FD             0x00002000
5134 #define ANEG_CFG_INVAL          0x00001f06
5135
5136 };
5137 #define ANEG_OK         0
5138 #define ANEG_DONE       1
5139 #define ANEG_TIMER_ENAB 2
5140 #define ANEG_FAILED     -1
5141
5142 #define ANEG_STATE_SETTLE_TIME  10000
5143
5144 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5145                                    struct tg3_fiber_aneginfo *ap)
5146 {
5147         u16 flowctrl;
5148         unsigned long delta;
5149         u32 rx_cfg_reg;
5150         int ret;
5151
5152         if (ap->state == ANEG_STATE_UNKNOWN) {
5153                 ap->rxconfig = 0;
5154                 ap->link_time = 0;
5155                 ap->cur_time = 0;
5156                 ap->ability_match_cfg = 0;
5157                 ap->ability_match_count = 0;
5158                 ap->ability_match = 0;
5159                 ap->idle_match = 0;
5160                 ap->ack_match = 0;
5161         }
5162         ap->cur_time++;
5163
5164         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5165                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5166
5167                 if (rx_cfg_reg != ap->ability_match_cfg) {
5168                         ap->ability_match_cfg = rx_cfg_reg;
5169                         ap->ability_match = 0;
5170                         ap->ability_match_count = 0;
5171                 } else {
5172                         if (++ap->ability_match_count > 1) {
5173                                 ap->ability_match = 1;
5174                                 ap->ability_match_cfg = rx_cfg_reg;
5175                         }
5176                 }
5177                 if (rx_cfg_reg & ANEG_CFG_ACK)
5178                         ap->ack_match = 1;
5179                 else
5180                         ap->ack_match = 0;
5181
5182                 ap->idle_match = 0;
5183         } else {
5184                 ap->idle_match = 1;
5185                 ap->ability_match_cfg = 0;
5186                 ap->ability_match_count = 0;
5187                 ap->ability_match = 0;
5188                 ap->ack_match = 0;
5189
5190                 rx_cfg_reg = 0;
5191         }
5192
5193         ap->rxconfig = rx_cfg_reg;
5194         ret = ANEG_OK;
5195
5196         switch (ap->state) {
5197         case ANEG_STATE_UNKNOWN:
5198                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5199                         ap->state = ANEG_STATE_AN_ENABLE;
5200
5201                 /* fallthru */
5202         case ANEG_STATE_AN_ENABLE:
5203                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5204                 if (ap->flags & MR_AN_ENABLE) {
5205                         ap->link_time = 0;
5206                         ap->cur_time = 0;
5207                         ap->ability_match_cfg = 0;
5208                         ap->ability_match_count = 0;
5209                         ap->ability_match = 0;
5210                         ap->idle_match = 0;
5211                         ap->ack_match = 0;
5212
5213                         ap->state = ANEG_STATE_RESTART_INIT;
5214                 } else {
5215                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5216                 }
5217                 break;
5218
5219         case ANEG_STATE_RESTART_INIT:
5220                 ap->link_time = ap->cur_time;
5221                 ap->flags &= ~(MR_NP_LOADED);
5222                 ap->txconfig = 0;
5223                 tw32(MAC_TX_AUTO_NEG, 0);
5224                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5225                 tw32_f(MAC_MODE, tp->mac_mode);
5226                 udelay(40);
5227
5228                 ret = ANEG_TIMER_ENAB;
5229                 ap->state = ANEG_STATE_RESTART;
5230
5231                 /* fallthru */
5232         case ANEG_STATE_RESTART:
5233                 delta = ap->cur_time - ap->link_time;
5234                 if (delta > ANEG_STATE_SETTLE_TIME)
5235                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5236                 else
5237                         ret = ANEG_TIMER_ENAB;
5238                 break;
5239
5240         case ANEG_STATE_DISABLE_LINK_OK:
5241                 ret = ANEG_DONE;
5242                 break;
5243
5244         case ANEG_STATE_ABILITY_DETECT_INIT:
5245                 ap->flags &= ~(MR_TOGGLE_TX);
5246                 ap->txconfig = ANEG_CFG_FD;
5247                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5248                 if (flowctrl & ADVERTISE_1000XPAUSE)
5249                         ap->txconfig |= ANEG_CFG_PS1;
5250                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5251                         ap->txconfig |= ANEG_CFG_PS2;
5252                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5253                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5254                 tw32_f(MAC_MODE, tp->mac_mode);
5255                 udelay(40);
5256
5257                 ap->state = ANEG_STATE_ABILITY_DETECT;
5258                 break;
5259
5260         case ANEG_STATE_ABILITY_DETECT:
5261                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5262                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5263                 break;
5264
5265         case ANEG_STATE_ACK_DETECT_INIT:
5266                 ap->txconfig |= ANEG_CFG_ACK;
5267                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5268                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5269                 tw32_f(MAC_MODE, tp->mac_mode);
5270                 udelay(40);
5271
5272                 ap->state = ANEG_STATE_ACK_DETECT;
5273
5274                 /* fallthru */
5275         case ANEG_STATE_ACK_DETECT:
5276                 if (ap->ack_match != 0) {
5277                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5278                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5279                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5280                         } else {
5281                                 ap->state = ANEG_STATE_AN_ENABLE;
5282                         }
5283                 } else if (ap->ability_match != 0 &&
5284                            ap->rxconfig == 0) {
5285                         ap->state = ANEG_STATE_AN_ENABLE;
5286                 }
5287                 break;
5288
5289         case ANEG_STATE_COMPLETE_ACK_INIT:
5290                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5291                         ret = ANEG_FAILED;
5292                         break;
5293                 }
5294                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5295                                MR_LP_ADV_HALF_DUPLEX |
5296                                MR_LP_ADV_SYM_PAUSE |
5297                                MR_LP_ADV_ASYM_PAUSE |
5298                                MR_LP_ADV_REMOTE_FAULT1 |
5299                                MR_LP_ADV_REMOTE_FAULT2 |
5300                                MR_LP_ADV_NEXT_PAGE |
5301                                MR_TOGGLE_RX |
5302                                MR_NP_RX);
5303                 if (ap->rxconfig & ANEG_CFG_FD)
5304                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5305                 if (ap->rxconfig & ANEG_CFG_HD)
5306                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5307                 if (ap->rxconfig & ANEG_CFG_PS1)
5308                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5309                 if (ap->rxconfig & ANEG_CFG_PS2)
5310                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5311                 if (ap->rxconfig & ANEG_CFG_RF1)
5312                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5313                 if (ap->rxconfig & ANEG_CFG_RF2)
5314                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5315                 if (ap->rxconfig & ANEG_CFG_NP)
5316                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5317
5318                 ap->link_time = ap->cur_time;
5319
5320                 ap->flags ^= (MR_TOGGLE_TX);
5321                 if (ap->rxconfig & 0x0008)
5322                         ap->flags |= MR_TOGGLE_RX;
5323                 if (ap->rxconfig & ANEG_CFG_NP)
5324                         ap->flags |= MR_NP_RX;
5325                 ap->flags |= MR_PAGE_RX;
5326
5327                 ap->state = ANEG_STATE_COMPLETE_ACK;
5328                 ret = ANEG_TIMER_ENAB;
5329                 break;
5330
5331         case ANEG_STATE_COMPLETE_ACK:
5332                 if (ap->ability_match != 0 &&
5333                     ap->rxconfig == 0) {
5334                         ap->state = ANEG_STATE_AN_ENABLE;
5335                         break;
5336                 }
5337                 delta = ap->cur_time - ap->link_time;
5338                 if (delta > ANEG_STATE_SETTLE_TIME) {
5339                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5340                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5341                         } else {
5342                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5343                                     !(ap->flags & MR_NP_RX)) {
5344                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5345                                 } else {
5346                                         ret = ANEG_FAILED;
5347                                 }
5348                         }
5349                 }
5350                 break;
5351
5352         case ANEG_STATE_IDLE_DETECT_INIT:
5353                 ap->link_time = ap->cur_time;
5354                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5355                 tw32_f(MAC_MODE, tp->mac_mode);
5356                 udelay(40);
5357
5358                 ap->state = ANEG_STATE_IDLE_DETECT;
5359                 ret = ANEG_TIMER_ENAB;
5360                 break;
5361
5362         case ANEG_STATE_IDLE_DETECT:
5363                 if (ap->ability_match != 0 &&
5364                     ap->rxconfig == 0) {
5365                         ap->state = ANEG_STATE_AN_ENABLE;
5366                         break;
5367                 }
5368                 delta = ap->cur_time - ap->link_time;
5369                 if (delta > ANEG_STATE_SETTLE_TIME) {
5370                         /* XXX another gem from the Broadcom driver :( */
5371                         ap->state = ANEG_STATE_LINK_OK;
5372                 }
5373                 break;
5374
5375         case ANEG_STATE_LINK_OK:
5376                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5377                 ret = ANEG_DONE;
5378                 break;
5379
5380         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5381                 /* ??? unimplemented */
5382                 break;
5383
5384         case ANEG_STATE_NEXT_PAGE_WAIT:
5385                 /* ??? unimplemented */
5386                 break;
5387
5388         default:
5389                 ret = ANEG_FAILED;
5390                 break;
5391         }
5392
5393         return ret;
5394 }
5395
5396 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5397 {
5398         int res = 0;
5399         struct tg3_fiber_aneginfo aninfo;
5400         int status = ANEG_FAILED;
5401         unsigned int tick;
5402         u32 tmp;
5403
5404         tw32_f(MAC_TX_AUTO_NEG, 0);
5405
5406         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5407         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5408         udelay(40);
5409
5410         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5411         udelay(40);
5412
5413         memset(&aninfo, 0, sizeof(aninfo));
5414         aninfo.flags |= MR_AN_ENABLE;
5415         aninfo.state = ANEG_STATE_UNKNOWN;
5416         aninfo.cur_time = 0;
5417         tick = 0;
5418         while (++tick < 195000) {
5419                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5420                 if (status == ANEG_DONE || status == ANEG_FAILED)
5421                         break;
5422
5423                 udelay(1);
5424         }
5425
5426         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5427         tw32_f(MAC_MODE, tp->mac_mode);
5428         udelay(40);
5429
5430         *txflags = aninfo.txconfig;
5431         *rxflags = aninfo.flags;
5432
5433         if (status == ANEG_DONE &&
5434             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5435                              MR_LP_ADV_FULL_DUPLEX)))
5436                 res = 1;
5437
5438         return res;
5439 }
5440
5441 static void tg3_init_bcm8002(struct tg3 *tp)
5442 {
5443         u32 mac_status = tr32(MAC_STATUS);
5444         int i;
5445
5446         /* Reset when initting first time or we have a link. */
5447         if (tg3_flag(tp, INIT_COMPLETE) &&
5448             !(mac_status & MAC_STATUS_PCS_SYNCED))
5449                 return;
5450
5451         /* Set PLL lock range. */
5452         tg3_writephy(tp, 0x16, 0x8007);
5453
5454         /* SW reset */
5455         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5456
5457         /* Wait for reset to complete. */
5458         /* XXX schedule_timeout() ... */
5459         for (i = 0; i < 500; i++)
5460                 udelay(10);
5461
5462         /* Config mode; select PMA/Ch 1 regs. */
5463         tg3_writephy(tp, 0x10, 0x8411);
5464
5465         /* Enable auto-lock and comdet, select txclk for tx. */
5466         tg3_writephy(tp, 0x11, 0x0a10);
5467
5468         tg3_writephy(tp, 0x18, 0x00a0);
5469         tg3_writephy(tp, 0x16, 0x41ff);
5470
5471         /* Assert and deassert POR. */
5472         tg3_writephy(tp, 0x13, 0x0400);
5473         udelay(40);
5474         tg3_writephy(tp, 0x13, 0x0000);
5475
5476         tg3_writephy(tp, 0x11, 0x0a50);
5477         udelay(40);
5478         tg3_writephy(tp, 0x11, 0x0a10);
5479
5480         /* Wait for signal to stabilize */
5481         /* XXX schedule_timeout() ... */
5482         for (i = 0; i < 15000; i++)
5483                 udelay(10);
5484
5485         /* Deselect the channel register so we can read the PHYID
5486          * later.
5487          */
5488         tg3_writephy(tp, 0x10, 0x8011);
5489 }
5490
5491 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5492 {
5493         u16 flowctrl;
5494         bool current_link_up;
5495         u32 sg_dig_ctrl, sg_dig_status;
5496         u32 serdes_cfg, expected_sg_dig_ctrl;
5497         int workaround, port_a;
5498
5499         serdes_cfg = 0;
5500         expected_sg_dig_ctrl = 0;
5501         workaround = 0;
5502         port_a = 1;
5503         current_link_up = false;
5504
5505         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5506             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5507                 workaround = 1;
5508                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5509                         port_a = 0;
5510
5511                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5512                 /* preserve bits 20-23 for voltage regulator */
5513                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5514         }
5515
5516         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5517
5518         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5519                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5520                         if (workaround) {
5521                                 u32 val = serdes_cfg;
5522
5523                                 if (port_a)
5524                                         val |= 0xc010000;
5525                                 else
5526                                         val |= 0x4010000;
5527                                 tw32_f(MAC_SERDES_CFG, val);
5528                         }
5529
5530                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5531                 }
5532                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5533                         tg3_setup_flow_control(tp, 0, 0);
5534                         current_link_up = true;
5535                 }
5536                 goto out;
5537         }
5538
5539         /* Want auto-negotiation.  */
5540         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5541
5542         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5543         if (flowctrl & ADVERTISE_1000XPAUSE)
5544                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5545         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5546                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5547
5548         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5549                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5550                     tp->serdes_counter &&
5551                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5552                                     MAC_STATUS_RCVD_CFG)) ==
5553                      MAC_STATUS_PCS_SYNCED)) {
5554                         tp->serdes_counter--;
5555                         current_link_up = true;
5556                         goto out;
5557                 }
5558 restart_autoneg:
5559                 if (workaround)
5560                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5561                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5562                 udelay(5);
5563                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5564
5565                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5566                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5567         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5568                                  MAC_STATUS_SIGNAL_DET)) {
5569                 sg_dig_status = tr32(SG_DIG_STATUS);
5570                 mac_status = tr32(MAC_STATUS);
5571
5572                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5573                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5574                         u32 local_adv = 0, remote_adv = 0;
5575
5576                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5577                                 local_adv |= ADVERTISE_1000XPAUSE;
5578                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5579                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5580
5581                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5582                                 remote_adv |= LPA_1000XPAUSE;
5583                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5584                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5585
5586                         tp->link_config.rmt_adv =
5587                                            mii_adv_to_ethtool_adv_x(remote_adv);
5588
5589                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5590                         current_link_up = true;
5591                         tp->serdes_counter = 0;
5592                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5593                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5594                         if (tp->serdes_counter)
5595                                 tp->serdes_counter--;
5596                         else {
5597                                 if (workaround) {
5598                                         u32 val = serdes_cfg;
5599
5600                                         if (port_a)
5601                                                 val |= 0xc010000;
5602                                         else
5603                                                 val |= 0x4010000;
5604
5605                                         tw32_f(MAC_SERDES_CFG, val);
5606                                 }
5607
5608                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5609                                 udelay(40);
5610
5611                                 /* Link parallel detection - link is up */
5612                                 /* only if we have PCS_SYNC and not */
5613                                 /* receiving config code words */
5614                                 mac_status = tr32(MAC_STATUS);
5615                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5616                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5617                                         tg3_setup_flow_control(tp, 0, 0);
5618                                         current_link_up = true;
5619                                         tp->phy_flags |=
5620                                                 TG3_PHYFLG_PARALLEL_DETECT;
5621                                         tp->serdes_counter =
5622                                                 SERDES_PARALLEL_DET_TIMEOUT;
5623                                 } else
5624                                         goto restart_autoneg;
5625                         }
5626                 }
5627         } else {
5628                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5629                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5630         }
5631
5632 out:
5633         return current_link_up;
5634 }
5635
5636 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5637 {
5638         bool current_link_up = false;
5639
5640         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5641                 goto out;
5642
5643         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5644                 u32 txflags, rxflags;
5645                 int i;
5646
5647                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5648                         u32 local_adv = 0, remote_adv = 0;
5649
5650                         if (txflags & ANEG_CFG_PS1)
5651                                 local_adv |= ADVERTISE_1000XPAUSE;
5652                         if (txflags & ANEG_CFG_PS2)
5653                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5654
5655                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5656                                 remote_adv |= LPA_1000XPAUSE;
5657                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5658                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5659
5660                         tp->link_config.rmt_adv =
5661                                            mii_adv_to_ethtool_adv_x(remote_adv);
5662
5663                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5664
5665                         current_link_up = true;
5666                 }
5667                 for (i = 0; i < 30; i++) {
5668                         udelay(20);
5669                         tw32_f(MAC_STATUS,
5670                                (MAC_STATUS_SYNC_CHANGED |
5671                                 MAC_STATUS_CFG_CHANGED));
5672                         udelay(40);
5673                         if ((tr32(MAC_STATUS) &
5674                              (MAC_STATUS_SYNC_CHANGED |
5675                               MAC_STATUS_CFG_CHANGED)) == 0)
5676                                 break;
5677                 }
5678
5679                 mac_status = tr32(MAC_STATUS);
5680                 if (!current_link_up &&
5681                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5682                     !(mac_status & MAC_STATUS_RCVD_CFG))
5683                         current_link_up = true;
5684         } else {
5685                 tg3_setup_flow_control(tp, 0, 0);
5686
5687                 /* Forcing 1000FD link up. */
5688                 current_link_up = true;
5689
5690                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5691                 udelay(40);
5692
5693                 tw32_f(MAC_MODE, tp->mac_mode);
5694                 udelay(40);
5695         }
5696
5697 out:
5698         return current_link_up;
5699 }
5700
5701 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5702 {
5703         u32 orig_pause_cfg;
5704         u16 orig_active_speed;
5705         u8 orig_active_duplex;
5706         u32 mac_status;
5707         bool current_link_up;
5708         int i;
5709
5710         orig_pause_cfg = tp->link_config.active_flowctrl;
5711         orig_active_speed = tp->link_config.active_speed;
5712         orig_active_duplex = tp->link_config.active_duplex;
5713
5714         if (!tg3_flag(tp, HW_AUTONEG) &&
5715             tp->link_up &&
5716             tg3_flag(tp, INIT_COMPLETE)) {
5717                 mac_status = tr32(MAC_STATUS);
5718                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5719                                MAC_STATUS_SIGNAL_DET |
5720                                MAC_STATUS_CFG_CHANGED |
5721                                MAC_STATUS_RCVD_CFG);
5722                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5723                                    MAC_STATUS_SIGNAL_DET)) {
5724                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5725                                             MAC_STATUS_CFG_CHANGED));
5726                         return 0;
5727                 }
5728         }
5729
5730         tw32_f(MAC_TX_AUTO_NEG, 0);
5731
5732         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5733         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5734         tw32_f(MAC_MODE, tp->mac_mode);
5735         udelay(40);
5736
5737         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5738                 tg3_init_bcm8002(tp);
5739
5740         /* Enable link change event even when serdes polling.  */
5741         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5742         udelay(40);
5743
5744         current_link_up = false;
5745         tp->link_config.rmt_adv = 0;
5746         mac_status = tr32(MAC_STATUS);
5747
5748         if (tg3_flag(tp, HW_AUTONEG))
5749                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5750         else
5751                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5752
5753         tp->napi[0].hw_status->status =
5754                 (SD_STATUS_UPDATED |
5755                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5756
5757         for (i = 0; i < 100; i++) {
5758                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5759                                     MAC_STATUS_CFG_CHANGED));
5760                 udelay(5);
5761                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5762                                          MAC_STATUS_CFG_CHANGED |
5763                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5764                         break;
5765         }
5766
5767         mac_status = tr32(MAC_STATUS);
5768         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5769                 current_link_up = false;
5770                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5771                     tp->serdes_counter == 0) {
5772                         tw32_f(MAC_MODE, (tp->mac_mode |
5773                                           MAC_MODE_SEND_CONFIGS));
5774                         udelay(1);
5775                         tw32_f(MAC_MODE, tp->mac_mode);
5776                 }
5777         }
5778
5779         if (current_link_up) {
5780                 tp->link_config.active_speed = SPEED_1000;
5781                 tp->link_config.active_duplex = DUPLEX_FULL;
5782                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5783                                     LED_CTRL_LNKLED_OVERRIDE |
5784                                     LED_CTRL_1000MBPS_ON));
5785         } else {
5786                 tp->link_config.active_speed = SPEED_UNKNOWN;
5787                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5788                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5789                                     LED_CTRL_LNKLED_OVERRIDE |
5790                                     LED_CTRL_TRAFFIC_OVERRIDE));
5791         }
5792
5793         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5794                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5795                 if (orig_pause_cfg != now_pause_cfg ||
5796                     orig_active_speed != tp->link_config.active_speed ||
5797                     orig_active_duplex != tp->link_config.active_duplex)
5798                         tg3_link_report(tp);
5799         }
5800
5801         return 0;
5802 }
5803
5804 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5805 {
5806         int err = 0;
5807         u32 bmsr, bmcr;
5808         u16 current_speed = SPEED_UNKNOWN;
5809         u8 current_duplex = DUPLEX_UNKNOWN;
5810         bool current_link_up = false;
5811         u32 local_adv, remote_adv, sgsr;
5812
5813         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5814              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5815              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5816              (sgsr & SERDES_TG3_SGMII_MODE)) {
5817
5818                 if (force_reset)
5819                         tg3_phy_reset(tp);
5820
5821                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5822
5823                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5824                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5825                 } else {
5826                         current_link_up = true;
5827                         if (sgsr & SERDES_TG3_SPEED_1000) {
5828                                 current_speed = SPEED_1000;
5829                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5831                                 current_speed = SPEED_100;
5832                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5833                         } else {
5834                                 current_speed = SPEED_10;
5835                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5836                         }
5837
5838                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5839                                 current_duplex = DUPLEX_FULL;
5840                         else
5841                                 current_duplex = DUPLEX_HALF;
5842                 }
5843
5844                 tw32_f(MAC_MODE, tp->mac_mode);
5845                 udelay(40);
5846
5847                 tg3_clear_mac_status(tp);
5848
5849                 goto fiber_setup_done;
5850         }
5851
5852         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5853         tw32_f(MAC_MODE, tp->mac_mode);
5854         udelay(40);
5855
5856         tg3_clear_mac_status(tp);
5857
5858         if (force_reset)
5859                 tg3_phy_reset(tp);
5860
5861         tp->link_config.rmt_adv = 0;
5862
5863         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5864         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5865         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5866                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5867                         bmsr |= BMSR_LSTATUS;
5868                 else
5869                         bmsr &= ~BMSR_LSTATUS;
5870         }
5871
5872         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5873
5874         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5875             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5876                 /* do nothing, just check for link up at the end */
5877         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5878                 u32 adv, newadv;
5879
5880                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5881                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5882                                  ADVERTISE_1000XPAUSE |
5883                                  ADVERTISE_1000XPSE_ASYM |
5884                                  ADVERTISE_SLCT);
5885
5886                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5887                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5888
5889                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5890                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5891                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5892                         tg3_writephy(tp, MII_BMCR, bmcr);
5893
5894                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5895                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5896                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5897
5898                         return err;
5899                 }
5900         } else {
5901                 u32 new_bmcr;
5902
5903                 bmcr &= ~BMCR_SPEED1000;
5904                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5905
5906                 if (tp->link_config.duplex == DUPLEX_FULL)
5907                         new_bmcr |= BMCR_FULLDPLX;
5908
5909                 if (new_bmcr != bmcr) {
5910                         /* BMCR_SPEED1000 is a reserved bit that needs
5911                          * to be set on write.
5912                          */
5913                         new_bmcr |= BMCR_SPEED1000;
5914
5915                         /* Force a linkdown */
5916                         if (tp->link_up) {
5917                                 u32 adv;
5918
5919                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5920                                 adv &= ~(ADVERTISE_1000XFULL |
5921                                          ADVERTISE_1000XHALF |
5922                                          ADVERTISE_SLCT);
5923                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5924                                 tg3_writephy(tp, MII_BMCR, bmcr |
5925                                                            BMCR_ANRESTART |
5926                                                            BMCR_ANENABLE);
5927                                 udelay(10);
5928                                 tg3_carrier_off(tp);
5929                         }
5930                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5931                         bmcr = new_bmcr;
5932                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5933                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5934                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5935                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5936                                         bmsr |= BMSR_LSTATUS;
5937                                 else
5938                                         bmsr &= ~BMSR_LSTATUS;
5939                         }
5940                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5941                 }
5942         }
5943
5944         if (bmsr & BMSR_LSTATUS) {
5945                 current_speed = SPEED_1000;
5946                 current_link_up = true;
5947                 if (bmcr & BMCR_FULLDPLX)
5948                         current_duplex = DUPLEX_FULL;
5949                 else
5950                         current_duplex = DUPLEX_HALF;
5951
5952                 local_adv = 0;
5953                 remote_adv = 0;
5954
5955                 if (bmcr & BMCR_ANENABLE) {
5956                         u32 common;
5957
5958                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5959                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5960                         common = local_adv & remote_adv;
5961                         if (common & (ADVERTISE_1000XHALF |
5962                                       ADVERTISE_1000XFULL)) {
5963                                 if (common & ADVERTISE_1000XFULL)
5964                                         current_duplex = DUPLEX_FULL;
5965                                 else
5966                                         current_duplex = DUPLEX_HALF;
5967
5968                                 tp->link_config.rmt_adv =
5969                                            mii_adv_to_ethtool_adv_x(remote_adv);
5970                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5971                                 /* Link is up via parallel detect */
5972                         } else {
5973                                 current_link_up = false;
5974                         }
5975                 }
5976         }
5977
5978 fiber_setup_done:
5979         if (current_link_up && current_duplex == DUPLEX_FULL)
5980                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5981
5982         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5983         if (tp->link_config.active_duplex == DUPLEX_HALF)
5984                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5985
5986         tw32_f(MAC_MODE, tp->mac_mode);
5987         udelay(40);
5988
5989         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5990
5991         tp->link_config.active_speed = current_speed;
5992         tp->link_config.active_duplex = current_duplex;
5993
5994         tg3_test_and_report_link_chg(tp, current_link_up);
5995         return err;
5996 }
5997
5998 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5999 {
6000         if (tp->serdes_counter) {
6001                 /* Give autoneg time to complete. */
6002                 tp->serdes_counter--;
6003                 return;
6004         }
6005
6006         if (!tp->link_up &&
6007             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6008                 u32 bmcr;
6009
6010                 tg3_readphy(tp, MII_BMCR, &bmcr);
6011                 if (bmcr & BMCR_ANENABLE) {
6012                         u32 phy1, phy2;
6013
6014                         /* Select shadow register 0x1f */
6015                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6016                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6017
6018                         /* Select expansion interrupt status register */
6019                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6020                                          MII_TG3_DSP_EXP1_INT_STAT);
6021                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6022                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6023
6024                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6025                                 /* We have signal detect and not receiving
6026                                  * config code words, link is up by parallel
6027                                  * detection.
6028                                  */
6029
6030                                 bmcr &= ~BMCR_ANENABLE;
6031                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6032                                 tg3_writephy(tp, MII_BMCR, bmcr);
6033                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6034                         }
6035                 }
6036         } else if (tp->link_up &&
6037                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6038                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6039                 u32 phy2;
6040
6041                 /* Select expansion interrupt status register */
6042                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6043                                  MII_TG3_DSP_EXP1_INT_STAT);
6044                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6045                 if (phy2 & 0x20) {
6046                         u32 bmcr;
6047
6048                         /* Config code words received, turn on autoneg. */
6049                         tg3_readphy(tp, MII_BMCR, &bmcr);
6050                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6051
6052                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6053
6054                 }
6055         }
6056 }
6057
6058 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6059 {
6060         u32 val;
6061         int err;
6062
6063         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6064                 err = tg3_setup_fiber_phy(tp, force_reset);
6065         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6066                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6067         else
6068                 err = tg3_setup_copper_phy(tp, force_reset);
6069
6070         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6071                 u32 scale;
6072
6073                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6074                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6075                         scale = 65;
6076                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6077                         scale = 6;
6078                 else
6079                         scale = 12;
6080
6081                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6082                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6083                 tw32(GRC_MISC_CFG, val);
6084         }
6085
6086         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6087               (6 << TX_LENGTHS_IPG_SHIFT);
6088         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6089             tg3_asic_rev(tp) == ASIC_REV_5762)
6090                 val |= tr32(MAC_TX_LENGTHS) &
6091                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6092                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6093
6094         if (tp->link_config.active_speed == SPEED_1000 &&
6095             tp->link_config.active_duplex == DUPLEX_HALF)
6096                 tw32(MAC_TX_LENGTHS, val |
6097                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6098         else
6099                 tw32(MAC_TX_LENGTHS, val |
6100                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6101
6102         if (!tg3_flag(tp, 5705_PLUS)) {
6103                 if (tp->link_up) {
6104                         tw32(HOSTCC_STAT_COAL_TICKS,
6105                              tp->coal.stats_block_coalesce_usecs);
6106                 } else {
6107                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6108                 }
6109         }
6110
6111         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6112                 val = tr32(PCIE_PWR_MGMT_THRESH);
6113                 if (!tp->link_up)
6114                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6115                               tp->pwrmgmt_thresh;
6116                 else
6117                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6118                 tw32(PCIE_PWR_MGMT_THRESH, val);
6119         }
6120
6121         return err;
6122 }
6123
6124 /* tp->lock must be held */
6125 static u64 tg3_refclk_read(struct tg3 *tp)
6126 {
6127         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6128         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6129 }
6130
6131 /* tp->lock must be held */
6132 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6133 {
6134         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6135
6136         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6137         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6138         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6139         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6140 }
6141
6142 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6143 static inline void tg3_full_unlock(struct tg3 *tp);
6144 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6145 {
6146         struct tg3 *tp = netdev_priv(dev);
6147
6148         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6149                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6150                                 SOF_TIMESTAMPING_SOFTWARE;
6151
6152         if (tg3_flag(tp, PTP_CAPABLE)) {
6153                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6154                                         SOF_TIMESTAMPING_RX_HARDWARE |
6155                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6156         }
6157
6158         if (tp->ptp_clock)
6159                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6160         else
6161                 info->phc_index = -1;
6162
6163         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6164
6165         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6166                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6167                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6168                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6169         return 0;
6170 }
6171
6172 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6173 {
6174         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6175         bool neg_adj = false;
6176         u32 correction = 0;
6177
6178         if (ppb < 0) {
6179                 neg_adj = true;
6180                 ppb = -ppb;
6181         }
6182
6183         /* Frequency adjustment is performed using hardware with a 24 bit
6184          * accumulator and a programmable correction value. On each clk, the
6185          * correction value gets added to the accumulator and when it
6186          * overflows, the time counter is incremented/decremented.
6187          *
6188          * So conversion from ppb to correction value is
6189          *              ppb * (1 << 24) / 1000000000
6190          */
6191         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6192                      TG3_EAV_REF_CLK_CORRECT_MASK;
6193
6194         tg3_full_lock(tp, 0);
6195
6196         if (correction)
6197                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6198                      TG3_EAV_REF_CLK_CORRECT_EN |
6199                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6200         else
6201                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6202
6203         tg3_full_unlock(tp);
6204
6205         return 0;
6206 }
6207
6208 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6209 {
6210         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6211
6212         tg3_full_lock(tp, 0);
6213         tp->ptp_adjust += delta;
6214         tg3_full_unlock(tp);
6215
6216         return 0;
6217 }
6218
6219 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6220 {
6221         u64 ns;
6222         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6223
6224         tg3_full_lock(tp, 0);
6225         ns = tg3_refclk_read(tp);
6226         ns += tp->ptp_adjust;
6227         tg3_full_unlock(tp);
6228
6229         *ts = ns_to_timespec64(ns);
6230
6231         return 0;
6232 }
6233
6234 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6235                            const struct timespec64 *ts)
6236 {
6237         u64 ns;
6238         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6239
6240         ns = timespec64_to_ns(ts);
6241
6242         tg3_full_lock(tp, 0);
6243         tg3_refclk_write(tp, ns);
6244         tp->ptp_adjust = 0;
6245         tg3_full_unlock(tp);
6246
6247         return 0;
6248 }
6249
6250 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6251                           struct ptp_clock_request *rq, int on)
6252 {
6253         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6254         u32 clock_ctl;
6255         int rval = 0;
6256
6257         switch (rq->type) {
6258         case PTP_CLK_REQ_PEROUT:
6259                 if (rq->perout.index != 0)
6260                         return -EINVAL;
6261
6262                 tg3_full_lock(tp, 0);
6263                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6264                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6265
6266                 if (on) {
6267                         u64 nsec;
6268
6269                         nsec = rq->perout.start.sec * 1000000000ULL +
6270                                rq->perout.start.nsec;
6271
6272                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6273                                 netdev_warn(tp->dev,
6274                                             "Device supports only a one-shot timesync output, period must be 0\n");
6275                                 rval = -EINVAL;
6276                                 goto err_out;
6277                         }
6278
6279                         if (nsec & (1ULL << 63)) {
6280                                 netdev_warn(tp->dev,
6281                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6282                                 rval = -EINVAL;
6283                                 goto err_out;
6284                         }
6285
6286                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6287                         tw32(TG3_EAV_WATCHDOG0_MSB,
6288                              TG3_EAV_WATCHDOG0_EN |
6289                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6290
6291                         tw32(TG3_EAV_REF_CLCK_CTL,
6292                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6293                 } else {
6294                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6295                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6296                 }
6297
6298 err_out:
6299                 tg3_full_unlock(tp);
6300                 return rval;
6301
6302         default:
6303                 break;
6304         }
6305
6306         return -EOPNOTSUPP;
6307 }
6308
6309 static const struct ptp_clock_info tg3_ptp_caps = {
6310         .owner          = THIS_MODULE,
6311         .name           = "tg3 clock",
6312         .max_adj        = 250000000,
6313         .n_alarm        = 0,
6314         .n_ext_ts       = 0,
6315         .n_per_out      = 1,
6316         .n_pins         = 0,
6317         .pps            = 0,
6318         .adjfreq        = tg3_ptp_adjfreq,
6319         .adjtime        = tg3_ptp_adjtime,
6320         .gettime64      = tg3_ptp_gettime,
6321         .settime64      = tg3_ptp_settime,
6322         .enable         = tg3_ptp_enable,
6323 };
6324
6325 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6326                                      struct skb_shared_hwtstamps *timestamp)
6327 {
6328         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6329         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6330                                            tp->ptp_adjust);
6331 }
6332
6333 /* tp->lock must be held */
6334 static void tg3_ptp_init(struct tg3 *tp)
6335 {
6336         if (!tg3_flag(tp, PTP_CAPABLE))
6337                 return;
6338
6339         /* Initialize the hardware clock to the system time. */
6340         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6341         tp->ptp_adjust = 0;
6342         tp->ptp_info = tg3_ptp_caps;
6343 }
6344
6345 /* tp->lock must be held */
6346 static void tg3_ptp_resume(struct tg3 *tp)
6347 {
6348         if (!tg3_flag(tp, PTP_CAPABLE))
6349                 return;
6350
6351         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6352         tp->ptp_adjust = 0;
6353 }
6354
6355 static void tg3_ptp_fini(struct tg3 *tp)
6356 {
6357         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6358                 return;
6359
6360         ptp_clock_unregister(tp->ptp_clock);
6361         tp->ptp_clock = NULL;
6362         tp->ptp_adjust = 0;
6363 }
6364
6365 static inline int tg3_irq_sync(struct tg3 *tp)
6366 {
6367         return tp->irq_sync;
6368 }
6369
6370 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6371 {
6372         int i;
6373
6374         dst = (u32 *)((u8 *)dst + off);
6375         for (i = 0; i < len; i += sizeof(u32))
6376                 *dst++ = tr32(off + i);
6377 }
6378
6379 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6380 {
6381         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6382         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6383         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6384         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6385         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6386         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6387         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6388         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6389         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6390         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6391         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6392         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6393         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6394         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6395         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6396         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6397         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6398         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6399         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6400
6401         if (tg3_flag(tp, SUPPORT_MSIX))
6402                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6403
6404         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6405         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6406         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6407         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6408         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6409         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6410         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6411         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6412
6413         if (!tg3_flag(tp, 5705_PLUS)) {
6414                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6415                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6416                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6417         }
6418
6419         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6420         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6421         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6422         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6423         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6424
6425         if (tg3_flag(tp, NVRAM))
6426                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6427 }
6428
6429 static void tg3_dump_state(struct tg3 *tp)
6430 {
6431         int i;
6432         u32 *regs;
6433
6434         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6435         if (!regs)
6436                 return;
6437
6438         if (tg3_flag(tp, PCI_EXPRESS)) {
6439                 /* Read up to but not including private PCI registers */
6440                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6441                         regs[i / sizeof(u32)] = tr32(i);
6442         } else
6443                 tg3_dump_legacy_regs(tp, regs);
6444
6445         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6446                 if (!regs[i + 0] && !regs[i + 1] &&
6447                     !regs[i + 2] && !regs[i + 3])
6448                         continue;
6449
6450                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6451                            i * 4,
6452                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6453         }
6454
6455         kfree(regs);
6456
6457         for (i = 0; i < tp->irq_cnt; i++) {
6458                 struct tg3_napi *tnapi = &tp->napi[i];
6459
6460                 /* SW status block */
6461                 netdev_err(tp->dev,
6462                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6463                            i,
6464                            tnapi->hw_status->status,
6465                            tnapi->hw_status->status_tag,
6466                            tnapi->hw_status->rx_jumbo_consumer,
6467                            tnapi->hw_status->rx_consumer,
6468                            tnapi->hw_status->rx_mini_consumer,
6469                            tnapi->hw_status->idx[0].rx_producer,
6470                            tnapi->hw_status->idx[0].tx_consumer);
6471
6472                 netdev_err(tp->dev,
6473                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6474                            i,
6475                            tnapi->last_tag, tnapi->last_irq_tag,
6476                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6477                            tnapi->rx_rcb_ptr,
6478                            tnapi->prodring.rx_std_prod_idx,
6479                            tnapi->prodring.rx_std_cons_idx,
6480                            tnapi->prodring.rx_jmb_prod_idx,
6481                            tnapi->prodring.rx_jmb_cons_idx);
6482         }
6483 }
6484
6485 /* This is called whenever we suspect that the system chipset is re-
6486  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6487  * is bogus tx completions. We try to recover by setting the
6488  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6489  * in the workqueue.
6490  */
6491 static void tg3_tx_recover(struct tg3 *tp)
6492 {
6493         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6494                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6495
6496         netdev_warn(tp->dev,
6497                     "The system may be re-ordering memory-mapped I/O "
6498                     "cycles to the network device, attempting to recover. "
6499                     "Please report the problem to the driver maintainer "
6500                     "and include system chipset information.\n");
6501
6502         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6503 }
6504
6505 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6506 {
6507         /* Tell compiler to fetch tx indices from memory. */
6508         barrier();
6509         return tnapi->tx_pending -
6510                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6511 }
6512
6513 /* Tigon3 never reports partial packet sends.  So we do not
6514  * need special logic to handle SKBs that have not had all
6515  * of their frags sent yet, like SunGEM does.
6516  */
6517 static void tg3_tx(struct tg3_napi *tnapi)
6518 {
6519         struct tg3 *tp = tnapi->tp;
6520         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6521         u32 sw_idx = tnapi->tx_cons;
6522         struct netdev_queue *txq;
6523         int index = tnapi - tp->napi;
6524         unsigned int pkts_compl = 0, bytes_compl = 0;
6525
6526         if (tg3_flag(tp, ENABLE_TSS))
6527                 index--;
6528
6529         txq = netdev_get_tx_queue(tp->dev, index);
6530
6531         while (sw_idx != hw_idx) {
6532                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6533                 struct sk_buff *skb = ri->skb;
6534                 int i, tx_bug = 0;
6535
6536                 if (unlikely(skb == NULL)) {
6537                         tg3_tx_recover(tp);
6538                         return;
6539                 }
6540
6541                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6542                         struct skb_shared_hwtstamps timestamp;
6543                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6544                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6545
6546                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6547
6548                         skb_tstamp_tx(skb, &timestamp);
6549                 }
6550
6551                 pci_unmap_single(tp->pdev,
6552                                  dma_unmap_addr(ri, mapping),
6553                                  skb_headlen(skb),
6554                                  PCI_DMA_TODEVICE);
6555
6556                 ri->skb = NULL;
6557
6558                 while (ri->fragmented) {
6559                         ri->fragmented = false;
6560                         sw_idx = NEXT_TX(sw_idx);
6561                         ri = &tnapi->tx_buffers[sw_idx];
6562                 }
6563
6564                 sw_idx = NEXT_TX(sw_idx);
6565
6566                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6567                         ri = &tnapi->tx_buffers[sw_idx];
6568                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6569                                 tx_bug = 1;
6570
6571                         pci_unmap_page(tp->pdev,
6572                                        dma_unmap_addr(ri, mapping),
6573                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6574                                        PCI_DMA_TODEVICE);
6575
6576                         while (ri->fragmented) {
6577                                 ri->fragmented = false;
6578                                 sw_idx = NEXT_TX(sw_idx);
6579                                 ri = &tnapi->tx_buffers[sw_idx];
6580                         }
6581
6582                         sw_idx = NEXT_TX(sw_idx);
6583                 }
6584
6585                 pkts_compl++;
6586                 bytes_compl += skb->len;
6587
6588                 dev_kfree_skb_any(skb);
6589
6590                 if (unlikely(tx_bug)) {
6591                         tg3_tx_recover(tp);
6592                         return;
6593                 }
6594         }
6595
6596         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6597
6598         tnapi->tx_cons = sw_idx;
6599
6600         /* Need to make the tx_cons update visible to tg3_start_xmit()
6601          * before checking for netif_queue_stopped().  Without the
6602          * memory barrier, there is a small possibility that tg3_start_xmit()
6603          * will miss it and cause the queue to be stopped forever.
6604          */
6605         smp_mb();
6606
6607         if (unlikely(netif_tx_queue_stopped(txq) &&
6608                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6609                 __netif_tx_lock(txq, smp_processor_id());
6610                 if (netif_tx_queue_stopped(txq) &&
6611                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6612                         netif_tx_wake_queue(txq);
6613                 __netif_tx_unlock(txq);
6614         }
6615 }
6616
6617 static void tg3_frag_free(bool is_frag, void *data)
6618 {
6619         if (is_frag)
6620                 skb_free_frag(data);
6621         else
6622                 kfree(data);
6623 }
6624
6625 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6626 {
6627         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6628                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6629
6630         if (!ri->data)
6631                 return;
6632
6633         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6634                          map_sz, PCI_DMA_FROMDEVICE);
6635         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6636         ri->data = NULL;
6637 }
6638
6639
6640 /* Returns size of skb allocated or < 0 on error.
6641  *
6642  * We only need to fill in the address because the other members
6643  * of the RX descriptor are invariant, see tg3_init_rings.
6644  *
6645  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6646  * posting buffers we only dirty the first cache line of the RX
6647  * descriptor (containing the address).  Whereas for the RX status
6648  * buffers the cpu only reads the last cacheline of the RX descriptor
6649  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6650  */
6651 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6652                              u32 opaque_key, u32 dest_idx_unmasked,
6653                              unsigned int *frag_size)
6654 {
6655         struct tg3_rx_buffer_desc *desc;
6656         struct ring_info *map;
6657         u8 *data;
6658         dma_addr_t mapping;
6659         int skb_size, data_size, dest_idx;
6660
6661         switch (opaque_key) {
6662         case RXD_OPAQUE_RING_STD:
6663                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6664                 desc = &tpr->rx_std[dest_idx];
6665                 map = &tpr->rx_std_buffers[dest_idx];
6666                 data_size = tp->rx_pkt_map_sz;
6667                 break;
6668
6669         case RXD_OPAQUE_RING_JUMBO:
6670                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6671                 desc = &tpr->rx_jmb[dest_idx].std;
6672                 map = &tpr->rx_jmb_buffers[dest_idx];
6673                 data_size = TG3_RX_JMB_MAP_SZ;
6674                 break;
6675
6676         default:
6677                 return -EINVAL;
6678         }
6679
6680         /* Do not overwrite any of the map or rp information
6681          * until we are sure we can commit to a new buffer.
6682          *
6683          * Callers depend upon this behavior and assume that
6684          * we leave everything unchanged if we fail.
6685          */
6686         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6687                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6688         if (skb_size <= PAGE_SIZE) {
6689                 data = netdev_alloc_frag(skb_size);
6690                 *frag_size = skb_size;
6691         } else {
6692                 data = kmalloc(skb_size, GFP_ATOMIC);
6693                 *frag_size = 0;
6694         }
6695         if (!data)
6696                 return -ENOMEM;
6697
6698         mapping = pci_map_single(tp->pdev,
6699                                  data + TG3_RX_OFFSET(tp),
6700                                  data_size,
6701                                  PCI_DMA_FROMDEVICE);
6702         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6703                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6704                 return -EIO;
6705         }
6706
6707         map->data = data;
6708         dma_unmap_addr_set(map, mapping, mapping);
6709
6710         desc->addr_hi = ((u64)mapping >> 32);
6711         desc->addr_lo = ((u64)mapping & 0xffffffff);
6712
6713         return data_size;
6714 }
6715
6716 /* We only need to move over in the address because the other
6717  * members of the RX descriptor are invariant.  See notes above
6718  * tg3_alloc_rx_data for full details.
6719  */
6720 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6721                            struct tg3_rx_prodring_set *dpr,
6722                            u32 opaque_key, int src_idx,
6723                            u32 dest_idx_unmasked)
6724 {
6725         struct tg3 *tp = tnapi->tp;
6726         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6727         struct ring_info *src_map, *dest_map;
6728         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6729         int dest_idx;
6730
6731         switch (opaque_key) {
6732         case RXD_OPAQUE_RING_STD:
6733                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6734                 dest_desc = &dpr->rx_std[dest_idx];
6735                 dest_map = &dpr->rx_std_buffers[dest_idx];
6736                 src_desc = &spr->rx_std[src_idx];
6737                 src_map = &spr->rx_std_buffers[src_idx];
6738                 break;
6739
6740         case RXD_OPAQUE_RING_JUMBO:
6741                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6742                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6743                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6744                 src_desc = &spr->rx_jmb[src_idx].std;
6745                 src_map = &spr->rx_jmb_buffers[src_idx];
6746                 break;
6747
6748         default:
6749                 return;
6750         }
6751
6752         dest_map->data = src_map->data;
6753         dma_unmap_addr_set(dest_map, mapping,
6754                            dma_unmap_addr(src_map, mapping));
6755         dest_desc->addr_hi = src_desc->addr_hi;
6756         dest_desc->addr_lo = src_desc->addr_lo;
6757
6758         /* Ensure that the update to the skb happens after the physical
6759          * addresses have been transferred to the new BD location.
6760          */
6761         smp_wmb();
6762
6763         src_map->data = NULL;
6764 }
6765
6766 /* The RX ring scheme is composed of multiple rings which post fresh
6767  * buffers to the chip, and one special ring the chip uses to report
6768  * status back to the host.
6769  *
6770  * The special ring reports the status of received packets to the
6771  * host.  The chip does not write into the original descriptor the
6772  * RX buffer was obtained from.  The chip simply takes the original
6773  * descriptor as provided by the host, updates the status and length
6774  * field, then writes this into the next status ring entry.
6775  *
6776  * Each ring the host uses to post buffers to the chip is described
6777  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6778  * it is first placed into the on-chip ram.  When the packet's length
6779  * is known, it walks down the TG3_BDINFO entries to select the ring.
6780  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6781  * which is within the range of the new packet's length is chosen.
6782  *
6783  * The "separate ring for rx status" scheme may sound queer, but it makes
6784  * sense from a cache coherency perspective.  If only the host writes
6785  * to the buffer post rings, and only the chip writes to the rx status
6786  * rings, then cache lines never move beyond shared-modified state.
6787  * If both the host and chip were to write into the same ring, cache line
6788  * eviction could occur since both entities want it in an exclusive state.
6789  */
6790 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6791 {
6792         struct tg3 *tp = tnapi->tp;
6793         u32 work_mask, rx_std_posted = 0;
6794         u32 std_prod_idx, jmb_prod_idx;
6795         u32 sw_idx = tnapi->rx_rcb_ptr;
6796         u16 hw_idx;
6797         int received;
6798         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6799
6800         hw_idx = *(tnapi->rx_rcb_prod_idx);
6801         /*
6802          * We need to order the read of hw_idx and the read of
6803          * the opaque cookie.
6804          */
6805         rmb();
6806         work_mask = 0;
6807         received = 0;
6808         std_prod_idx = tpr->rx_std_prod_idx;
6809         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6810         while (sw_idx != hw_idx && budget > 0) {
6811                 struct ring_info *ri;
6812                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6813                 unsigned int len;
6814                 struct sk_buff *skb;
6815                 dma_addr_t dma_addr;
6816                 u32 opaque_key, desc_idx, *post_ptr;
6817                 u8 *data;
6818                 u64 tstamp = 0;
6819
6820                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6821                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6822                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6823                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6824                         dma_addr = dma_unmap_addr(ri, mapping);
6825                         data = ri->data;
6826                         post_ptr = &std_prod_idx;
6827                         rx_std_posted++;
6828                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6829                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6830                         dma_addr = dma_unmap_addr(ri, mapping);
6831                         data = ri->data;
6832                         post_ptr = &jmb_prod_idx;
6833                 } else
6834                         goto next_pkt_nopost;
6835
6836                 work_mask |= opaque_key;
6837
6838                 if (desc->err_vlan & RXD_ERR_MASK) {
6839                 drop_it:
6840                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6841                                        desc_idx, *post_ptr);
6842                 drop_it_no_recycle:
6843                         /* Other statistics kept track of by card. */
6844                         tp->rx_dropped++;
6845                         goto next_pkt;
6846                 }
6847
6848                 prefetch(data + TG3_RX_OFFSET(tp));
6849                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6850                       ETH_FCS_LEN;
6851
6852                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6853                      RXD_FLAG_PTPSTAT_PTPV1 ||
6854                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6855                      RXD_FLAG_PTPSTAT_PTPV2) {
6856                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6857                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6858                 }
6859
6860                 if (len > TG3_RX_COPY_THRESH(tp)) {
6861                         int skb_size;
6862                         unsigned int frag_size;
6863
6864                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6865                                                     *post_ptr, &frag_size);
6866                         if (skb_size < 0)
6867                                 goto drop_it;
6868
6869                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6870                                          PCI_DMA_FROMDEVICE);
6871
6872                         /* Ensure that the update to the data happens
6873                          * after the usage of the old DMA mapping.
6874                          */
6875                         smp_wmb();
6876
6877                         ri->data = NULL;
6878
6879                         skb = build_skb(data, frag_size);
6880                         if (!skb) {
6881                                 tg3_frag_free(frag_size != 0, data);
6882                                 goto drop_it_no_recycle;
6883                         }
6884                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6885                 } else {
6886                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6887                                        desc_idx, *post_ptr);
6888
6889                         skb = netdev_alloc_skb(tp->dev,
6890                                                len + TG3_RAW_IP_ALIGN);
6891                         if (skb == NULL)
6892                                 goto drop_it_no_recycle;
6893
6894                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6895                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6896                         memcpy(skb->data,
6897                                data + TG3_RX_OFFSET(tp),
6898                                len);
6899                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6900                 }
6901
6902                 skb_put(skb, len);
6903                 if (tstamp)
6904                         tg3_hwclock_to_timestamp(tp, tstamp,
6905                                                  skb_hwtstamps(skb));
6906
6907                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6908                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6909                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6910                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6911                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6912                 else
6913                         skb_checksum_none_assert(skb);
6914
6915                 skb->protocol = eth_type_trans(skb, tp->dev);
6916
6917                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6918                     skb->protocol != htons(ETH_P_8021Q) &&
6919                     skb->protocol != htons(ETH_P_8021AD)) {
6920                         dev_kfree_skb_any(skb);
6921                         goto drop_it_no_recycle;
6922                 }
6923
6924                 if (desc->type_flags & RXD_FLAG_VLAN &&
6925                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6926                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6927                                                desc->err_vlan & RXD_VLAN_MASK);
6928
6929                 napi_gro_receive(&tnapi->napi, skb);
6930
6931                 received++;
6932                 budget--;
6933
6934 next_pkt:
6935                 (*post_ptr)++;
6936
6937                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6938                         tpr->rx_std_prod_idx = std_prod_idx &
6939                                                tp->rx_std_ring_mask;
6940                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6941                                      tpr->rx_std_prod_idx);
6942                         work_mask &= ~RXD_OPAQUE_RING_STD;
6943                         rx_std_posted = 0;
6944                 }
6945 next_pkt_nopost:
6946                 sw_idx++;
6947                 sw_idx &= tp->rx_ret_ring_mask;
6948
6949                 /* Refresh hw_idx to see if there is new work */
6950                 if (sw_idx == hw_idx) {
6951                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6952                         rmb();
6953                 }
6954         }
6955
6956         /* ACK the status ring. */
6957         tnapi->rx_rcb_ptr = sw_idx;
6958         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6959
6960         /* Refill RX ring(s). */
6961         if (!tg3_flag(tp, ENABLE_RSS)) {
6962                 /* Sync BD data before updating mailbox */
6963                 wmb();
6964
6965                 if (work_mask & RXD_OPAQUE_RING_STD) {
6966                         tpr->rx_std_prod_idx = std_prod_idx &
6967                                                tp->rx_std_ring_mask;
6968                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6969                                      tpr->rx_std_prod_idx);
6970                 }
6971                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6972                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6973                                                tp->rx_jmb_ring_mask;
6974                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6975                                      tpr->rx_jmb_prod_idx);
6976                 }
6977                 mmiowb();
6978         } else if (work_mask) {
6979                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6980                  * updated before the producer indices can be updated.
6981                  */
6982                 smp_wmb();
6983
6984                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6985                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6986
6987                 if (tnapi != &tp->napi[1]) {
6988                         tp->rx_refill = true;
6989                         napi_schedule(&tp->napi[1].napi);
6990                 }
6991         }
6992
6993         return received;
6994 }
6995
6996 static void tg3_poll_link(struct tg3 *tp)
6997 {
6998         /* handle link change and other phy events */
6999         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7000                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7001
7002                 if (sblk->status & SD_STATUS_LINK_CHG) {
7003                         sblk->status = SD_STATUS_UPDATED |
7004                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7005                         spin_lock(&tp->lock);
7006                         if (tg3_flag(tp, USE_PHYLIB)) {
7007                                 tw32_f(MAC_STATUS,
7008                                      (MAC_STATUS_SYNC_CHANGED |
7009                                       MAC_STATUS_CFG_CHANGED |
7010                                       MAC_STATUS_MI_COMPLETION |
7011                                       MAC_STATUS_LNKSTATE_CHANGED));
7012                                 udelay(40);
7013                         } else
7014                                 tg3_setup_phy(tp, false);
7015                         spin_unlock(&tp->lock);
7016                 }
7017         }
7018 }
7019
7020 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7021                                 struct tg3_rx_prodring_set *dpr,
7022                                 struct tg3_rx_prodring_set *spr)
7023 {
7024         u32 si, di, cpycnt, src_prod_idx;
7025         int i, err = 0;
7026
7027         while (1) {
7028                 src_prod_idx = spr->rx_std_prod_idx;
7029
7030                 /* Make sure updates to the rx_std_buffers[] entries and the
7031                  * standard producer index are seen in the correct order.
7032                  */
7033                 smp_rmb();
7034
7035                 if (spr->rx_std_cons_idx == src_prod_idx)
7036                         break;
7037
7038                 if (spr->rx_std_cons_idx < src_prod_idx)
7039                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7040                 else
7041                         cpycnt = tp->rx_std_ring_mask + 1 -
7042                                  spr->rx_std_cons_idx;
7043
7044                 cpycnt = min(cpycnt,
7045                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7046
7047                 si = spr->rx_std_cons_idx;
7048                 di = dpr->rx_std_prod_idx;
7049
7050                 for (i = di; i < di + cpycnt; i++) {
7051                         if (dpr->rx_std_buffers[i].data) {
7052                                 cpycnt = i - di;
7053                                 err = -ENOSPC;
7054                                 break;
7055                         }
7056                 }
7057
7058                 if (!cpycnt)
7059                         break;
7060
7061                 /* Ensure that updates to the rx_std_buffers ring and the
7062                  * shadowed hardware producer ring from tg3_recycle_skb() are
7063                  * ordered correctly WRT the skb check above.
7064                  */
7065                 smp_rmb();
7066
7067                 memcpy(&dpr->rx_std_buffers[di],
7068                        &spr->rx_std_buffers[si],
7069                        cpycnt * sizeof(struct ring_info));
7070
7071                 for (i = 0; i < cpycnt; i++, di++, si++) {
7072                         struct tg3_rx_buffer_desc *sbd, *dbd;
7073                         sbd = &spr->rx_std[si];
7074                         dbd = &dpr->rx_std[di];
7075                         dbd->addr_hi = sbd->addr_hi;
7076                         dbd->addr_lo = sbd->addr_lo;
7077                 }
7078
7079                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7080                                        tp->rx_std_ring_mask;
7081                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7082                                        tp->rx_std_ring_mask;
7083         }
7084
7085         while (1) {
7086                 src_prod_idx = spr->rx_jmb_prod_idx;
7087
7088                 /* Make sure updates to the rx_jmb_buffers[] entries and
7089                  * the jumbo producer index are seen in the correct order.
7090                  */
7091                 smp_rmb();
7092
7093                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7094                         break;
7095
7096                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7097                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7098                 else
7099                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7100                                  spr->rx_jmb_cons_idx;
7101
7102                 cpycnt = min(cpycnt,
7103                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7104
7105                 si = spr->rx_jmb_cons_idx;
7106                 di = dpr->rx_jmb_prod_idx;
7107
7108                 for (i = di; i < di + cpycnt; i++) {
7109                         if (dpr->rx_jmb_buffers[i].data) {
7110                                 cpycnt = i - di;
7111                                 err = -ENOSPC;
7112                                 break;
7113                         }
7114                 }
7115
7116                 if (!cpycnt)
7117                         break;
7118
7119                 /* Ensure that updates to the rx_jmb_buffers ring and the
7120                  * shadowed hardware producer ring from tg3_recycle_skb() are
7121                  * ordered correctly WRT the skb check above.
7122                  */
7123                 smp_rmb();
7124
7125                 memcpy(&dpr->rx_jmb_buffers[di],
7126                        &spr->rx_jmb_buffers[si],
7127                        cpycnt * sizeof(struct ring_info));
7128
7129                 for (i = 0; i < cpycnt; i++, di++, si++) {
7130                         struct tg3_rx_buffer_desc *sbd, *dbd;
7131                         sbd = &spr->rx_jmb[si].std;
7132                         dbd = &dpr->rx_jmb[di].std;
7133                         dbd->addr_hi = sbd->addr_hi;
7134                         dbd->addr_lo = sbd->addr_lo;
7135                 }
7136
7137                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7138                                        tp->rx_jmb_ring_mask;
7139                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7140                                        tp->rx_jmb_ring_mask;
7141         }
7142
7143         return err;
7144 }
7145
7146 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7147 {
7148         struct tg3 *tp = tnapi->tp;
7149
7150         /* run TX completion thread */
7151         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7152                 tg3_tx(tnapi);
7153                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7154                         return work_done;
7155         }
7156
7157         if (!tnapi->rx_rcb_prod_idx)
7158                 return work_done;
7159
7160         /* run RX thread, within the bounds set by NAPI.
7161          * All RX "locking" is done by ensuring outside
7162          * code synchronizes with tg3->napi.poll()
7163          */
7164         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7165                 work_done += tg3_rx(tnapi, budget - work_done);
7166
7167         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7168                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7169                 int i, err = 0;
7170                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7171                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7172
7173                 tp->rx_refill = false;
7174                 for (i = 1; i <= tp->rxq_cnt; i++)
7175                         err |= tg3_rx_prodring_xfer(tp, dpr,
7176                                                     &tp->napi[i].prodring);
7177
7178                 wmb();
7179
7180                 if (std_prod_idx != dpr->rx_std_prod_idx)
7181                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7182                                      dpr->rx_std_prod_idx);
7183
7184                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7185                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7186                                      dpr->rx_jmb_prod_idx);
7187
7188                 mmiowb();
7189
7190                 if (err)
7191                         tw32_f(HOSTCC_MODE, tp->coal_now);
7192         }
7193
7194         return work_done;
7195 }
7196
7197 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7198 {
7199         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7200                 schedule_work(&tp->reset_task);
7201 }
7202
7203 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7204 {
7205         cancel_work_sync(&tp->reset_task);
7206         tg3_flag_clear(tp, RESET_TASK_PENDING);
7207         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7208 }
7209
7210 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7211 {
7212         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7213         struct tg3 *tp = tnapi->tp;
7214         int work_done = 0;
7215         struct tg3_hw_status *sblk = tnapi->hw_status;
7216
7217         while (1) {
7218                 work_done = tg3_poll_work(tnapi, work_done, budget);
7219
7220                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7221                         goto tx_recovery;
7222
7223                 if (unlikely(work_done >= budget))
7224                         break;
7225
7226                 /* tp->last_tag is used in tg3_int_reenable() below
7227                  * to tell the hw how much work has been processed,
7228                  * so we must read it before checking for more work.
7229                  */
7230                 tnapi->last_tag = sblk->status_tag;
7231                 tnapi->last_irq_tag = tnapi->last_tag;
7232                 rmb();
7233
7234                 /* check for RX/TX work to do */
7235                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7236                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7237
7238                         /* This test here is not race free, but will reduce
7239                          * the number of interrupts by looping again.
7240                          */
7241                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7242                                 continue;
7243
7244                         napi_complete_done(napi, work_done);
7245                         /* Reenable interrupts. */
7246                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7247
7248                         /* This test here is synchronized by napi_schedule()
7249                          * and napi_complete() to close the race condition.
7250                          */
7251                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7252                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7253                                                   HOSTCC_MODE_ENABLE |
7254                                                   tnapi->coal_now);
7255                         }
7256                         mmiowb();
7257                         break;
7258                 }
7259         }
7260
7261         return work_done;
7262
7263 tx_recovery:
7264         /* work_done is guaranteed to be less than budget. */
7265         napi_complete(napi);
7266         tg3_reset_task_schedule(tp);
7267         return work_done;
7268 }
7269
7270 static void tg3_process_error(struct tg3 *tp)
7271 {
7272         u32 val;
7273         bool real_error = false;
7274
7275         if (tg3_flag(tp, ERROR_PROCESSED))
7276                 return;
7277
7278         /* Check Flow Attention register */
7279         val = tr32(HOSTCC_FLOW_ATTN);
7280         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7281                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7282                 real_error = true;
7283         }
7284
7285         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7286                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7287                 real_error = true;
7288         }
7289
7290         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7291                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7292                 real_error = true;
7293         }
7294
7295         if (!real_error)
7296                 return;
7297
7298         tg3_dump_state(tp);
7299
7300         tg3_flag_set(tp, ERROR_PROCESSED);
7301         tg3_reset_task_schedule(tp);
7302 }
7303
7304 static int tg3_poll(struct napi_struct *napi, int budget)
7305 {
7306         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7307         struct tg3 *tp = tnapi->tp;
7308         int work_done = 0;
7309         struct tg3_hw_status *sblk = tnapi->hw_status;
7310
7311         while (1) {
7312                 if (sblk->status & SD_STATUS_ERROR)
7313                         tg3_process_error(tp);
7314
7315                 tg3_poll_link(tp);
7316
7317                 work_done = tg3_poll_work(tnapi, work_done, budget);
7318
7319                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7320                         goto tx_recovery;
7321
7322                 if (unlikely(work_done >= budget))
7323                         break;
7324
7325                 if (tg3_flag(tp, TAGGED_STATUS)) {
7326                         /* tp->last_tag is used in tg3_int_reenable() below
7327                          * to tell the hw how much work has been processed,
7328                          * so we must read it before checking for more work.
7329                          */
7330                         tnapi->last_tag = sblk->status_tag;
7331                         tnapi->last_irq_tag = tnapi->last_tag;
7332                         rmb();
7333                 } else
7334                         sblk->status &= ~SD_STATUS_UPDATED;
7335
7336                 if (likely(!tg3_has_work(tnapi))) {
7337                         napi_complete_done(napi, work_done);
7338                         tg3_int_reenable(tnapi);
7339                         break;
7340                 }
7341         }
7342
7343         return work_done;
7344
7345 tx_recovery:
7346         /* work_done is guaranteed to be less than budget. */
7347         napi_complete(napi);
7348         tg3_reset_task_schedule(tp);
7349         return work_done;
7350 }
7351
7352 static void tg3_napi_disable(struct tg3 *tp)
7353 {
7354         int i;
7355
7356         for (i = tp->irq_cnt - 1; i >= 0; i--)
7357                 napi_disable(&tp->napi[i].napi);
7358 }
7359
7360 static void tg3_napi_enable(struct tg3 *tp)
7361 {
7362         int i;
7363
7364         for (i = 0; i < tp->irq_cnt; i++)
7365                 napi_enable(&tp->napi[i].napi);
7366 }
7367
7368 static void tg3_napi_init(struct tg3 *tp)
7369 {
7370         int i;
7371
7372         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7373         for (i = 1; i < tp->irq_cnt; i++)
7374                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7375 }
7376
7377 static void tg3_napi_fini(struct tg3 *tp)
7378 {
7379         int i;
7380
7381         for (i = 0; i < tp->irq_cnt; i++)
7382                 netif_napi_del(&tp->napi[i].napi);
7383 }
7384
7385 static inline void tg3_netif_stop(struct tg3 *tp)
7386 {
7387         netif_trans_update(tp->dev);    /* prevent tx timeout */
7388         tg3_napi_disable(tp);
7389         netif_carrier_off(tp->dev);
7390         netif_tx_disable(tp->dev);
7391 }
7392
7393 /* tp->lock must be held */
7394 static inline void tg3_netif_start(struct tg3 *tp)
7395 {
7396         tg3_ptp_resume(tp);
7397
7398         /* NOTE: unconditional netif_tx_wake_all_queues is only
7399          * appropriate so long as all callers are assured to
7400          * have free tx slots (such as after tg3_init_hw)
7401          */
7402         netif_tx_wake_all_queues(tp->dev);
7403
7404         if (tp->link_up)
7405                 netif_carrier_on(tp->dev);
7406
7407         tg3_napi_enable(tp);
7408         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7409         tg3_enable_ints(tp);
7410 }
7411
7412 static void tg3_irq_quiesce(struct tg3 *tp)
7413         __releases(tp->lock)
7414         __acquires(tp->lock)
7415 {
7416         int i;
7417
7418         BUG_ON(tp->irq_sync);
7419
7420         tp->irq_sync = 1;
7421         smp_mb();
7422
7423         spin_unlock_bh(&tp->lock);
7424
7425         for (i = 0; i < tp->irq_cnt; i++)
7426                 synchronize_irq(tp->napi[i].irq_vec);
7427
7428         spin_lock_bh(&tp->lock);
7429 }
7430
7431 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7432  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7433  * with as well.  Most of the time, this is not necessary except when
7434  * shutting down the device.
7435  */
7436 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7437 {
7438         spin_lock_bh(&tp->lock);
7439         if (irq_sync)
7440                 tg3_irq_quiesce(tp);
7441 }
7442
7443 static inline void tg3_full_unlock(struct tg3 *tp)
7444 {
7445         spin_unlock_bh(&tp->lock);
7446 }
7447
7448 /* One-shot MSI handler - Chip automatically disables interrupt
7449  * after sending MSI so driver doesn't have to do it.
7450  */
7451 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7452 {
7453         struct tg3_napi *tnapi = dev_id;
7454         struct tg3 *tp = tnapi->tp;
7455
7456         prefetch(tnapi->hw_status);
7457         if (tnapi->rx_rcb)
7458                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7459
7460         if (likely(!tg3_irq_sync(tp)))
7461                 napi_schedule(&tnapi->napi);
7462
7463         return IRQ_HANDLED;
7464 }
7465
7466 /* MSI ISR - No need to check for interrupt sharing and no need to
7467  * flush status block and interrupt mailbox. PCI ordering rules
7468  * guarantee that MSI will arrive after the status block.
7469  */
7470 static irqreturn_t tg3_msi(int irq, void *dev_id)
7471 {
7472         struct tg3_napi *tnapi = dev_id;
7473         struct tg3 *tp = tnapi->tp;
7474
7475         prefetch(tnapi->hw_status);
7476         if (tnapi->rx_rcb)
7477                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7478         /*
7479          * Writing any value to intr-mbox-0 clears PCI INTA# and
7480          * chip-internal interrupt pending events.
7481          * Writing non-zero to intr-mbox-0 additional tells the
7482          * NIC to stop sending us irqs, engaging "in-intr-handler"
7483          * event coalescing.
7484          */
7485         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7486         if (likely(!tg3_irq_sync(tp)))
7487                 napi_schedule(&tnapi->napi);
7488
7489         return IRQ_RETVAL(1);
7490 }
7491
7492 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7493 {
7494         struct tg3_napi *tnapi = dev_id;
7495         struct tg3 *tp = tnapi->tp;
7496         struct tg3_hw_status *sblk = tnapi->hw_status;
7497         unsigned int handled = 1;
7498
7499         /* In INTx mode, it is possible for the interrupt to arrive at
7500          * the CPU before the status block posted prior to the interrupt.
7501          * Reading the PCI State register will confirm whether the
7502          * interrupt is ours and will flush the status block.
7503          */
7504         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7505                 if (tg3_flag(tp, CHIP_RESETTING) ||
7506                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7507                         handled = 0;
7508                         goto out;
7509                 }
7510         }
7511
7512         /*
7513          * Writing any value to intr-mbox-0 clears PCI INTA# and
7514          * chip-internal interrupt pending events.
7515          * Writing non-zero to intr-mbox-0 additional tells the
7516          * NIC to stop sending us irqs, engaging "in-intr-handler"
7517          * event coalescing.
7518          *
7519          * Flush the mailbox to de-assert the IRQ immediately to prevent
7520          * spurious interrupts.  The flush impacts performance but
7521          * excessive spurious interrupts can be worse in some cases.
7522          */
7523         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7524         if (tg3_irq_sync(tp))
7525                 goto out;
7526         sblk->status &= ~SD_STATUS_UPDATED;
7527         if (likely(tg3_has_work(tnapi))) {
7528                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7529                 napi_schedule(&tnapi->napi);
7530         } else {
7531                 /* No work, shared interrupt perhaps?  re-enable
7532                  * interrupts, and flush that PCI write
7533                  */
7534                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7535                                0x00000000);
7536         }
7537 out:
7538         return IRQ_RETVAL(handled);
7539 }
7540
7541 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7542 {
7543         struct tg3_napi *tnapi = dev_id;
7544         struct tg3 *tp = tnapi->tp;
7545         struct tg3_hw_status *sblk = tnapi->hw_status;
7546         unsigned int handled = 1;
7547
7548         /* In INTx mode, it is possible for the interrupt to arrive at
7549          * the CPU before the status block posted prior to the interrupt.
7550          * Reading the PCI State register will confirm whether the
7551          * interrupt is ours and will flush the status block.
7552          */
7553         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7554                 if (tg3_flag(tp, CHIP_RESETTING) ||
7555                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7556                         handled = 0;
7557                         goto out;
7558                 }
7559         }
7560
7561         /*
7562          * writing any value to intr-mbox-0 clears PCI INTA# and
7563          * chip-internal interrupt pending events.
7564          * writing non-zero to intr-mbox-0 additional tells the
7565          * NIC to stop sending us irqs, engaging "in-intr-handler"
7566          * event coalescing.
7567          *
7568          * Flush the mailbox to de-assert the IRQ immediately to prevent
7569          * spurious interrupts.  The flush impacts performance but
7570          * excessive spurious interrupts can be worse in some cases.
7571          */
7572         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7573
7574         /*
7575          * In a shared interrupt configuration, sometimes other devices'
7576          * interrupts will scream.  We record the current status tag here
7577          * so that the above check can report that the screaming interrupts
7578          * are unhandled.  Eventually they will be silenced.
7579          */
7580         tnapi->last_irq_tag = sblk->status_tag;
7581
7582         if (tg3_irq_sync(tp))
7583                 goto out;
7584
7585         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7586
7587         napi_schedule(&tnapi->napi);
7588
7589 out:
7590         return IRQ_RETVAL(handled);
7591 }
7592
7593 /* ISR for interrupt test */
7594 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7595 {
7596         struct tg3_napi *tnapi = dev_id;
7597         struct tg3 *tp = tnapi->tp;
7598         struct tg3_hw_status *sblk = tnapi->hw_status;
7599
7600         if ((sblk->status & SD_STATUS_UPDATED) ||
7601             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7602                 tg3_disable_ints(tp);
7603                 return IRQ_RETVAL(1);
7604         }
7605         return IRQ_RETVAL(0);
7606 }
7607
7608 #ifdef CONFIG_NET_POLL_CONTROLLER
7609 static void tg3_poll_controller(struct net_device *dev)
7610 {
7611         int i;
7612         struct tg3 *tp = netdev_priv(dev);
7613
7614         if (tg3_irq_sync(tp))
7615                 return;
7616
7617         for (i = 0; i < tp->irq_cnt; i++)
7618                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7619 }
7620 #endif
7621
7622 static void tg3_tx_timeout(struct net_device *dev)
7623 {
7624         struct tg3 *tp = netdev_priv(dev);
7625
7626         if (netif_msg_tx_err(tp)) {
7627                 netdev_err(dev, "transmit timed out, resetting\n");
7628                 tg3_dump_state(tp);
7629         }
7630
7631         tg3_reset_task_schedule(tp);
7632 }
7633
7634 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7635 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7636 {
7637         u32 base = (u32) mapping & 0xffffffff;
7638
7639         return base + len + 8 < base;
7640 }
7641
7642 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7643  * of any 4GB boundaries: 4G, 8G, etc
7644  */
7645 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7646                                            u32 len, u32 mss)
7647 {
7648         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7649                 u32 base = (u32) mapping & 0xffffffff;
7650
7651                 return ((base + len + (mss & 0x3fff)) < base);
7652         }
7653         return 0;
7654 }
7655
7656 /* Test for DMA addresses > 40-bit */
7657 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7658                                           int len)
7659 {
7660 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7661         if (tg3_flag(tp, 40BIT_DMA_BUG))
7662                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7663         return 0;
7664 #else
7665         return 0;
7666 #endif
7667 }
7668
7669 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7670                                  dma_addr_t mapping, u32 len, u32 flags,
7671                                  u32 mss, u32 vlan)
7672 {
7673         txbd->addr_hi = ((u64) mapping >> 32);
7674         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7675         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7676         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7677 }
7678
7679 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7680                             dma_addr_t map, u32 len, u32 flags,
7681                             u32 mss, u32 vlan)
7682 {
7683         struct tg3 *tp = tnapi->tp;
7684         bool hwbug = false;
7685
7686         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7687                 hwbug = true;
7688
7689         if (tg3_4g_overflow_test(map, len))
7690                 hwbug = true;
7691
7692         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7693                 hwbug = true;
7694
7695         if (tg3_40bit_overflow_test(tp, map, len))
7696                 hwbug = true;
7697
7698         if (tp->dma_limit) {
7699                 u32 prvidx = *entry;
7700                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7701                 while (len > tp->dma_limit && *budget) {
7702                         u32 frag_len = tp->dma_limit;
7703                         len -= tp->dma_limit;
7704
7705                         /* Avoid the 8byte DMA problem */
7706                         if (len <= 8) {
7707                                 len += tp->dma_limit / 2;
7708                                 frag_len = tp->dma_limit / 2;
7709                         }
7710
7711                         tnapi->tx_buffers[*entry].fragmented = true;
7712
7713                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7714                                       frag_len, tmp_flag, mss, vlan);
7715                         *budget -= 1;
7716                         prvidx = *entry;
7717                         *entry = NEXT_TX(*entry);
7718
7719                         map += frag_len;
7720                 }
7721
7722                 if (len) {
7723                         if (*budget) {
7724                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7725                                               len, flags, mss, vlan);
7726                                 *budget -= 1;
7727                                 *entry = NEXT_TX(*entry);
7728                         } else {
7729                                 hwbug = true;
7730                                 tnapi->tx_buffers[prvidx].fragmented = false;
7731                         }
7732                 }
7733         } else {
7734                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7735                               len, flags, mss, vlan);
7736                 *entry = NEXT_TX(*entry);
7737         }
7738
7739         return hwbug;
7740 }
7741
7742 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7743 {
7744         int i;
7745         struct sk_buff *skb;
7746         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7747
7748         skb = txb->skb;
7749         txb->skb = NULL;
7750
7751         pci_unmap_single(tnapi->tp->pdev,
7752                          dma_unmap_addr(txb, mapping),
7753                          skb_headlen(skb),
7754                          PCI_DMA_TODEVICE);
7755
7756         while (txb->fragmented) {
7757                 txb->fragmented = false;
7758                 entry = NEXT_TX(entry);
7759                 txb = &tnapi->tx_buffers[entry];
7760         }
7761
7762         for (i = 0; i <= last; i++) {
7763                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7764
7765                 entry = NEXT_TX(entry);
7766                 txb = &tnapi->tx_buffers[entry];
7767
7768                 pci_unmap_page(tnapi->tp->pdev,
7769                                dma_unmap_addr(txb, mapping),
7770                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7771
7772                 while (txb->fragmented) {
7773                         txb->fragmented = false;
7774                         entry = NEXT_TX(entry);
7775                         txb = &tnapi->tx_buffers[entry];
7776                 }
7777         }
7778 }
7779
7780 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7781 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7782                                        struct sk_buff **pskb,
7783                                        u32 *entry, u32 *budget,
7784                                        u32 base_flags, u32 mss, u32 vlan)
7785 {
7786         struct tg3 *tp = tnapi->tp;
7787         struct sk_buff *new_skb, *skb = *pskb;
7788         dma_addr_t new_addr = 0;
7789         int ret = 0;
7790
7791         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7792                 new_skb = skb_copy(skb, GFP_ATOMIC);
7793         else {
7794                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7795
7796                 new_skb = skb_copy_expand(skb,
7797                                           skb_headroom(skb) + more_headroom,
7798                                           skb_tailroom(skb), GFP_ATOMIC);
7799         }
7800
7801         if (!new_skb) {
7802                 ret = -1;
7803         } else {
7804                 /* New SKB is guaranteed to be linear. */
7805                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7806                                           PCI_DMA_TODEVICE);
7807                 /* Make sure the mapping succeeded */
7808                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7809                         dev_kfree_skb_any(new_skb);
7810                         ret = -1;
7811                 } else {
7812                         u32 save_entry = *entry;
7813
7814                         base_flags |= TXD_FLAG_END;
7815
7816                         tnapi->tx_buffers[*entry].skb = new_skb;
7817                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7818                                            mapping, new_addr);
7819
7820                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7821                                             new_skb->len, base_flags,
7822                                             mss, vlan)) {
7823                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7824                                 dev_kfree_skb_any(new_skb);
7825                                 ret = -1;
7826                         }
7827                 }
7828         }
7829
7830         dev_kfree_skb_any(skb);
7831         *pskb = new_skb;
7832         return ret;
7833 }
7834
7835 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7836 {
7837         /* Check if we will never have enough descriptors,
7838          * as gso_segs can be more than current ring size
7839          */
7840         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7841 }
7842
7843 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7844
7845 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7846  * indicated in tg3_tx_frag_set()
7847  */
7848 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7849                        struct netdev_queue *txq, struct sk_buff *skb)
7850 {
7851         struct sk_buff *segs, *nskb;
7852         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7853
7854         /* Estimate the number of fragments in the worst case */
7855         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7856                 netif_tx_stop_queue(txq);
7857
7858                 /* netif_tx_stop_queue() must be done before checking
7859                  * checking tx index in tg3_tx_avail() below, because in
7860                  * tg3_tx(), we update tx index before checking for
7861                  * netif_tx_queue_stopped().
7862                  */
7863                 smp_mb();
7864                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7865                         return NETDEV_TX_BUSY;
7866
7867                 netif_tx_wake_queue(txq);
7868         }
7869
7870         segs = skb_gso_segment(skb, tp->dev->features &
7871                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7872         if (IS_ERR(segs) || !segs)
7873                 goto tg3_tso_bug_end;
7874
7875         do {
7876                 nskb = segs;
7877                 segs = segs->next;
7878                 nskb->next = NULL;
7879                 tg3_start_xmit(nskb, tp->dev);
7880         } while (segs);
7881
7882 tg3_tso_bug_end:
7883         dev_kfree_skb_any(skb);
7884
7885         return NETDEV_TX_OK;
7886 }
7887
7888 /* hard_start_xmit for all devices */
7889 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7890 {
7891         struct tg3 *tp = netdev_priv(dev);
7892         u32 len, entry, base_flags, mss, vlan = 0;
7893         u32 budget;
7894         int i = -1, would_hit_hwbug;
7895         dma_addr_t mapping;
7896         struct tg3_napi *tnapi;
7897         struct netdev_queue *txq;
7898         unsigned int last;
7899         struct iphdr *iph = NULL;
7900         struct tcphdr *tcph = NULL;
7901         __sum16 tcp_csum = 0, ip_csum = 0;
7902         __be16 ip_tot_len = 0;
7903
7904         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7905         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7906         if (tg3_flag(tp, ENABLE_TSS))
7907                 tnapi++;
7908
7909         budget = tg3_tx_avail(tnapi);
7910
7911         /* We are running in BH disabled context with netif_tx_lock
7912          * and TX reclaim runs via tp->napi.poll inside of a software
7913          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7914          * no IRQ context deadlocks to worry about either.  Rejoice!
7915          */
7916         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7917                 if (!netif_tx_queue_stopped(txq)) {
7918                         netif_tx_stop_queue(txq);
7919
7920                         /* This is a hard error, log it. */
7921                         netdev_err(dev,
7922                                    "BUG! Tx Ring full when queue awake!\n");
7923                 }
7924                 return NETDEV_TX_BUSY;
7925         }
7926
7927         entry = tnapi->tx_prod;
7928         base_flags = 0;
7929
7930         mss = skb_shinfo(skb)->gso_size;
7931         if (mss) {
7932                 u32 tcp_opt_len, hdr_len;
7933
7934                 if (skb_cow_head(skb, 0))
7935                         goto drop;
7936
7937                 iph = ip_hdr(skb);
7938                 tcp_opt_len = tcp_optlen(skb);
7939
7940                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7941
7942                 /* HW/FW can not correctly segment packets that have been
7943                  * vlan encapsulated.
7944                  */
7945                 if (skb->protocol == htons(ETH_P_8021Q) ||
7946                     skb->protocol == htons(ETH_P_8021AD)) {
7947                         if (tg3_tso_bug_gso_check(tnapi, skb))
7948                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7949                         goto drop;
7950                 }
7951
7952                 if (!skb_is_gso_v6(skb)) {
7953                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7954                             tg3_flag(tp, TSO_BUG)) {
7955                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7956                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7957                                 goto drop;
7958                         }
7959                         ip_csum = iph->check;
7960                         ip_tot_len = iph->tot_len;
7961                         iph->check = 0;
7962                         iph->tot_len = htons(mss + hdr_len);
7963                 }
7964
7965                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7966                                TXD_FLAG_CPU_POST_DMA);
7967
7968                 tcph = tcp_hdr(skb);
7969                 tcp_csum = tcph->check;
7970
7971                 if (tg3_flag(tp, HW_TSO_1) ||
7972                     tg3_flag(tp, HW_TSO_2) ||
7973                     tg3_flag(tp, HW_TSO_3)) {
7974                         tcph->check = 0;
7975                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7976                 } else {
7977                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7978                                                          0, IPPROTO_TCP, 0);
7979                 }
7980
7981                 if (tg3_flag(tp, HW_TSO_3)) {
7982                         mss |= (hdr_len & 0xc) << 12;
7983                         if (hdr_len & 0x10)
7984                                 base_flags |= 0x00000010;
7985                         base_flags |= (hdr_len & 0x3e0) << 5;
7986                 } else if (tg3_flag(tp, HW_TSO_2))
7987                         mss |= hdr_len << 9;
7988                 else if (tg3_flag(tp, HW_TSO_1) ||
7989                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7990                         if (tcp_opt_len || iph->ihl > 5) {
7991                                 int tsflags;
7992
7993                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7994                                 mss |= (tsflags << 11);
7995                         }
7996                 } else {
7997                         if (tcp_opt_len || iph->ihl > 5) {
7998                                 int tsflags;
7999
8000                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8001                                 base_flags |= tsflags << 12;
8002                         }
8003                 }
8004         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8005                 /* HW/FW can not correctly checksum packets that have been
8006                  * vlan encapsulated.
8007                  */
8008                 if (skb->protocol == htons(ETH_P_8021Q) ||
8009                     skb->protocol == htons(ETH_P_8021AD)) {
8010                         if (skb_checksum_help(skb))
8011                                 goto drop;
8012                 } else  {
8013                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8014                 }
8015         }
8016
8017         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8018             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8019                 base_flags |= TXD_FLAG_JMB_PKT;
8020
8021         if (skb_vlan_tag_present(skb)) {
8022                 base_flags |= TXD_FLAG_VLAN;
8023                 vlan = skb_vlan_tag_get(skb);
8024         }
8025
8026         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8027             tg3_flag(tp, TX_TSTAMP_EN)) {
8028                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8029                 base_flags |= TXD_FLAG_HWTSTAMP;
8030         }
8031
8032         len = skb_headlen(skb);
8033
8034         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8035         if (pci_dma_mapping_error(tp->pdev, mapping))
8036                 goto drop;
8037
8038
8039         tnapi->tx_buffers[entry].skb = skb;
8040         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8041
8042         would_hit_hwbug = 0;
8043
8044         if (tg3_flag(tp, 5701_DMA_BUG))
8045                 would_hit_hwbug = 1;
8046
8047         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8048                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8049                             mss, vlan)) {
8050                 would_hit_hwbug = 1;
8051         } else if (skb_shinfo(skb)->nr_frags > 0) {
8052                 u32 tmp_mss = mss;
8053
8054                 if (!tg3_flag(tp, HW_TSO_1) &&
8055                     !tg3_flag(tp, HW_TSO_2) &&
8056                     !tg3_flag(tp, HW_TSO_3))
8057                         tmp_mss = 0;
8058
8059                 /* Now loop through additional data
8060                  * fragments, and queue them.
8061                  */
8062                 last = skb_shinfo(skb)->nr_frags - 1;
8063                 for (i = 0; i <= last; i++) {
8064                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8065
8066                         len = skb_frag_size(frag);
8067                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8068                                                    len, DMA_TO_DEVICE);
8069
8070                         tnapi->tx_buffers[entry].skb = NULL;
8071                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8072                                            mapping);
8073                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8074                                 goto dma_error;
8075
8076                         if (!budget ||
8077                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8078                                             len, base_flags |
8079                                             ((i == last) ? TXD_FLAG_END : 0),
8080                                             tmp_mss, vlan)) {
8081                                 would_hit_hwbug = 1;
8082                                 break;
8083                         }
8084                 }
8085         }
8086
8087         if (would_hit_hwbug) {
8088                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8089
8090                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8091                         /* If it's a TSO packet, do GSO instead of
8092                          * allocating and copying to a large linear SKB
8093                          */
8094                         if (ip_tot_len) {
8095                                 iph->check = ip_csum;
8096                                 iph->tot_len = ip_tot_len;
8097                         }
8098                         tcph->check = tcp_csum;
8099                         return tg3_tso_bug(tp, tnapi, txq, skb);
8100                 }
8101
8102                 /* If the workaround fails due to memory/mapping
8103                  * failure, silently drop this packet.
8104                  */
8105                 entry = tnapi->tx_prod;
8106                 budget = tg3_tx_avail(tnapi);
8107                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8108                                                 base_flags, mss, vlan))
8109                         goto drop_nofree;
8110         }
8111
8112         skb_tx_timestamp(skb);
8113         netdev_tx_sent_queue(txq, skb->len);
8114
8115         /* Sync BD data before updating mailbox */
8116         wmb();
8117
8118         tnapi->tx_prod = entry;
8119         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8120                 netif_tx_stop_queue(txq);
8121
8122                 /* netif_tx_stop_queue() must be done before checking
8123                  * checking tx index in tg3_tx_avail() below, because in
8124                  * tg3_tx(), we update tx index before checking for
8125                  * netif_tx_queue_stopped().
8126                  */
8127                 smp_mb();
8128                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8129                         netif_tx_wake_queue(txq);
8130         }
8131
8132         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8133                 /* Packets are ready, update Tx producer idx on card. */
8134                 tw32_tx_mbox(tnapi->prodmbox, entry);
8135                 mmiowb();
8136         }
8137
8138         return NETDEV_TX_OK;
8139
8140 dma_error:
8141         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8142         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8143 drop:
8144         dev_kfree_skb_any(skb);
8145 drop_nofree:
8146         tp->tx_dropped++;
8147         return NETDEV_TX_OK;
8148 }
8149
8150 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8151 {
8152         if (enable) {
8153                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8154                                   MAC_MODE_PORT_MODE_MASK);
8155
8156                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8157
8158                 if (!tg3_flag(tp, 5705_PLUS))
8159                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8160
8161                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8162                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8163                 else
8164                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8165         } else {
8166                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8167
8168                 if (tg3_flag(tp, 5705_PLUS) ||
8169                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8170                     tg3_asic_rev(tp) == ASIC_REV_5700)
8171                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8172         }
8173
8174         tw32(MAC_MODE, tp->mac_mode);
8175         udelay(40);
8176 }
8177
8178 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8179 {
8180         u32 val, bmcr, mac_mode, ptest = 0;
8181
8182         tg3_phy_toggle_apd(tp, false);
8183         tg3_phy_toggle_automdix(tp, false);
8184
8185         if (extlpbk && tg3_phy_set_extloopbk(tp))
8186                 return -EIO;
8187
8188         bmcr = BMCR_FULLDPLX;
8189         switch (speed) {
8190         case SPEED_10:
8191                 break;
8192         case SPEED_100:
8193                 bmcr |= BMCR_SPEED100;
8194                 break;
8195         case SPEED_1000:
8196         default:
8197                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8198                         speed = SPEED_100;
8199                         bmcr |= BMCR_SPEED100;
8200                 } else {
8201                         speed = SPEED_1000;
8202                         bmcr |= BMCR_SPEED1000;
8203                 }
8204         }
8205
8206         if (extlpbk) {
8207                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8208                         tg3_readphy(tp, MII_CTRL1000, &val);
8209                         val |= CTL1000_AS_MASTER |
8210                                CTL1000_ENABLE_MASTER;
8211                         tg3_writephy(tp, MII_CTRL1000, val);
8212                 } else {
8213                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8214                                 MII_TG3_FET_PTEST_TRIM_2;
8215                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8216                 }
8217         } else
8218                 bmcr |= BMCR_LOOPBACK;
8219
8220         tg3_writephy(tp, MII_BMCR, bmcr);
8221
8222         /* The write needs to be flushed for the FETs */
8223         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8224                 tg3_readphy(tp, MII_BMCR, &bmcr);
8225
8226         udelay(40);
8227
8228         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8229             tg3_asic_rev(tp) == ASIC_REV_5785) {
8230                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8231                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8232                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8233
8234                 /* The write needs to be flushed for the AC131 */
8235                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8236         }
8237
8238         /* Reset to prevent losing 1st rx packet intermittently */
8239         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8240             tg3_flag(tp, 5780_CLASS)) {
8241                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8242                 udelay(10);
8243                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8244         }
8245
8246         mac_mode = tp->mac_mode &
8247                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8248         if (speed == SPEED_1000)
8249                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8250         else
8251                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8252
8253         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8254                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8255
8256                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8257                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8258                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8259                         mac_mode |= MAC_MODE_LINK_POLARITY;
8260
8261                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8262                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8263         }
8264
8265         tw32(MAC_MODE, mac_mode);
8266         udelay(40);
8267
8268         return 0;
8269 }
8270
8271 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8272 {
8273         struct tg3 *tp = netdev_priv(dev);
8274
8275         if (features & NETIF_F_LOOPBACK) {
8276                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8277                         return;
8278
8279                 spin_lock_bh(&tp->lock);
8280                 tg3_mac_loopback(tp, true);
8281                 netif_carrier_on(tp->dev);
8282                 spin_unlock_bh(&tp->lock);
8283                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8284         } else {
8285                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8286                         return;
8287
8288                 spin_lock_bh(&tp->lock);
8289                 tg3_mac_loopback(tp, false);
8290                 /* Force link status check */
8291                 tg3_setup_phy(tp, true);
8292                 spin_unlock_bh(&tp->lock);
8293                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8294         }
8295 }
8296
8297 static netdev_features_t tg3_fix_features(struct net_device *dev,
8298         netdev_features_t features)
8299 {
8300         struct tg3 *tp = netdev_priv(dev);
8301
8302         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8303                 features &= ~NETIF_F_ALL_TSO;
8304
8305         return features;
8306 }
8307
8308 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8309 {
8310         netdev_features_t changed = dev->features ^ features;
8311
8312         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8313                 tg3_set_loopback(dev, features);
8314
8315         return 0;
8316 }
8317
8318 static void tg3_rx_prodring_free(struct tg3 *tp,
8319                                  struct tg3_rx_prodring_set *tpr)
8320 {
8321         int i;
8322
8323         if (tpr != &tp->napi[0].prodring) {
8324                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8325                      i = (i + 1) & tp->rx_std_ring_mask)
8326                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8327                                         tp->rx_pkt_map_sz);
8328
8329                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8330                         for (i = tpr->rx_jmb_cons_idx;
8331                              i != tpr->rx_jmb_prod_idx;
8332                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8333                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8334                                                 TG3_RX_JMB_MAP_SZ);
8335                         }
8336                 }
8337
8338                 return;
8339         }
8340
8341         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8342                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8343                                 tp->rx_pkt_map_sz);
8344
8345         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8346                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8347                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8348                                         TG3_RX_JMB_MAP_SZ);
8349         }
8350 }
8351
8352 /* Initialize rx rings for packet processing.
8353  *
8354  * The chip has been shut down and the driver detached from
8355  * the networking, so no interrupts or new tx packets will
8356  * end up in the driver.  tp->{tx,}lock are held and thus
8357  * we may not sleep.
8358  */
8359 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8360                                  struct tg3_rx_prodring_set *tpr)
8361 {
8362         u32 i, rx_pkt_dma_sz;
8363
8364         tpr->rx_std_cons_idx = 0;
8365         tpr->rx_std_prod_idx = 0;
8366         tpr->rx_jmb_cons_idx = 0;
8367         tpr->rx_jmb_prod_idx = 0;
8368
8369         if (tpr != &tp->napi[0].prodring) {
8370                 memset(&tpr->rx_std_buffers[0], 0,
8371                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8372                 if (tpr->rx_jmb_buffers)
8373                         memset(&tpr->rx_jmb_buffers[0], 0,
8374                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8375                 goto done;
8376         }
8377
8378         /* Zero out all descriptors. */
8379         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8380
8381         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8382         if (tg3_flag(tp, 5780_CLASS) &&
8383             tp->dev->mtu > ETH_DATA_LEN)
8384                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8385         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8386
8387         /* Initialize invariants of the rings, we only set this
8388          * stuff once.  This works because the card does not
8389          * write into the rx buffer posting rings.
8390          */
8391         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8392                 struct tg3_rx_buffer_desc *rxd;
8393
8394                 rxd = &tpr->rx_std[i];
8395                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8396                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8397                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8398                                (i << RXD_OPAQUE_INDEX_SHIFT));
8399         }
8400
8401         /* Now allocate fresh SKBs for each rx ring. */
8402         for (i = 0; i < tp->rx_pending; i++) {
8403                 unsigned int frag_size;
8404
8405                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8406                                       &frag_size) < 0) {
8407                         netdev_warn(tp->dev,
8408                                     "Using a smaller RX standard ring. Only "
8409                                     "%d out of %d buffers were allocated "
8410                                     "successfully\n", i, tp->rx_pending);
8411                         if (i == 0)
8412                                 goto initfail;
8413                         tp->rx_pending = i;
8414                         break;
8415                 }
8416         }
8417
8418         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8419                 goto done;
8420
8421         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8422
8423         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8424                 goto done;
8425
8426         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8427                 struct tg3_rx_buffer_desc *rxd;
8428
8429                 rxd = &tpr->rx_jmb[i].std;
8430                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8431                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8432                                   RXD_FLAG_JUMBO;
8433                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8434                        (i << RXD_OPAQUE_INDEX_SHIFT));
8435         }
8436
8437         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8438                 unsigned int frag_size;
8439
8440                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8441                                       &frag_size) < 0) {
8442                         netdev_warn(tp->dev,
8443                                     "Using a smaller RX jumbo ring. Only %d "
8444                                     "out of %d buffers were allocated "
8445                                     "successfully\n", i, tp->rx_jumbo_pending);
8446                         if (i == 0)
8447                                 goto initfail;
8448                         tp->rx_jumbo_pending = i;
8449                         break;
8450                 }
8451         }
8452
8453 done:
8454         return 0;
8455
8456 initfail:
8457         tg3_rx_prodring_free(tp, tpr);
8458         return -ENOMEM;
8459 }
8460
8461 static void tg3_rx_prodring_fini(struct tg3 *tp,
8462                                  struct tg3_rx_prodring_set *tpr)
8463 {
8464         kfree(tpr->rx_std_buffers);
8465         tpr->rx_std_buffers = NULL;
8466         kfree(tpr->rx_jmb_buffers);
8467         tpr->rx_jmb_buffers = NULL;
8468         if (tpr->rx_std) {
8469                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8470                                   tpr->rx_std, tpr->rx_std_mapping);
8471                 tpr->rx_std = NULL;
8472         }
8473         if (tpr->rx_jmb) {
8474                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8475                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8476                 tpr->rx_jmb = NULL;
8477         }
8478 }
8479
8480 static int tg3_rx_prodring_init(struct tg3 *tp,
8481                                 struct tg3_rx_prodring_set *tpr)
8482 {
8483         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8484                                       GFP_KERNEL);
8485         if (!tpr->rx_std_buffers)
8486                 return -ENOMEM;
8487
8488         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8489                                          TG3_RX_STD_RING_BYTES(tp),
8490                                          &tpr->rx_std_mapping,
8491                                          GFP_KERNEL);
8492         if (!tpr->rx_std)
8493                 goto err_out;
8494
8495         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8496                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8497                                               GFP_KERNEL);
8498                 if (!tpr->rx_jmb_buffers)
8499                         goto err_out;
8500
8501                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8502                                                  TG3_RX_JMB_RING_BYTES(tp),
8503                                                  &tpr->rx_jmb_mapping,
8504                                                  GFP_KERNEL);
8505                 if (!tpr->rx_jmb)
8506                         goto err_out;
8507         }
8508
8509         return 0;
8510
8511 err_out:
8512         tg3_rx_prodring_fini(tp, tpr);
8513         return -ENOMEM;
8514 }
8515
8516 /* Free up pending packets in all rx/tx rings.
8517  *
8518  * The chip has been shut down and the driver detached from
8519  * the networking, so no interrupts or new tx packets will
8520  * end up in the driver.  tp->{tx,}lock is not held and we are not
8521  * in an interrupt context and thus may sleep.
8522  */
8523 static void tg3_free_rings(struct tg3 *tp)
8524 {
8525         int i, j;
8526
8527         for (j = 0; j < tp->irq_cnt; j++) {
8528                 struct tg3_napi *tnapi = &tp->napi[j];
8529
8530                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8531
8532                 if (!tnapi->tx_buffers)
8533                         continue;
8534
8535                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8536                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8537
8538                         if (!skb)
8539                                 continue;
8540
8541                         tg3_tx_skb_unmap(tnapi, i,
8542                                          skb_shinfo(skb)->nr_frags - 1);
8543
8544                         dev_kfree_skb_any(skb);
8545                 }
8546                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8547         }
8548 }
8549
8550 /* Initialize tx/rx rings for packet processing.
8551  *
8552  * The chip has been shut down and the driver detached from
8553  * the networking, so no interrupts or new tx packets will
8554  * end up in the driver.  tp->{tx,}lock are held and thus
8555  * we may not sleep.
8556  */
8557 static int tg3_init_rings(struct tg3 *tp)
8558 {
8559         int i;
8560
8561         /* Free up all the SKBs. */
8562         tg3_free_rings(tp);
8563
8564         for (i = 0; i < tp->irq_cnt; i++) {
8565                 struct tg3_napi *tnapi = &tp->napi[i];
8566
8567                 tnapi->last_tag = 0;
8568                 tnapi->last_irq_tag = 0;
8569                 tnapi->hw_status->status = 0;
8570                 tnapi->hw_status->status_tag = 0;
8571                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8572
8573                 tnapi->tx_prod = 0;
8574                 tnapi->tx_cons = 0;
8575                 if (tnapi->tx_ring)
8576                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8577
8578                 tnapi->rx_rcb_ptr = 0;
8579                 if (tnapi->rx_rcb)
8580                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8581
8582                 if (tnapi->prodring.rx_std &&
8583                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8584                         tg3_free_rings(tp);
8585                         return -ENOMEM;
8586                 }
8587         }
8588
8589         return 0;
8590 }
8591
8592 static void tg3_mem_tx_release(struct tg3 *tp)
8593 {
8594         int i;
8595
8596         for (i = 0; i < tp->irq_max; i++) {
8597                 struct tg3_napi *tnapi = &tp->napi[i];
8598
8599                 if (tnapi->tx_ring) {
8600                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8601                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8602                         tnapi->tx_ring = NULL;
8603                 }
8604
8605                 kfree(tnapi->tx_buffers);
8606                 tnapi->tx_buffers = NULL;
8607         }
8608 }
8609
8610 static int tg3_mem_tx_acquire(struct tg3 *tp)
8611 {
8612         int i;
8613         struct tg3_napi *tnapi = &tp->napi[0];
8614
8615         /* If multivector TSS is enabled, vector 0 does not handle
8616          * tx interrupts.  Don't allocate any resources for it.
8617          */
8618         if (tg3_flag(tp, ENABLE_TSS))
8619                 tnapi++;
8620
8621         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8622                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8623                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8624                 if (!tnapi->tx_buffers)
8625                         goto err_out;
8626
8627                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8628                                                     TG3_TX_RING_BYTES,
8629                                                     &tnapi->tx_desc_mapping,
8630                                                     GFP_KERNEL);
8631                 if (!tnapi->tx_ring)
8632                         goto err_out;
8633         }
8634
8635         return 0;
8636
8637 err_out:
8638         tg3_mem_tx_release(tp);
8639         return -ENOMEM;
8640 }
8641
8642 static void tg3_mem_rx_release(struct tg3 *tp)
8643 {
8644         int i;
8645
8646         for (i = 0; i < tp->irq_max; i++) {
8647                 struct tg3_napi *tnapi = &tp->napi[i];
8648
8649                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8650
8651                 if (!tnapi->rx_rcb)
8652                         continue;
8653
8654                 dma_free_coherent(&tp->pdev->dev,
8655                                   TG3_RX_RCB_RING_BYTES(tp),
8656                                   tnapi->rx_rcb,
8657                                   tnapi->rx_rcb_mapping);
8658                 tnapi->rx_rcb = NULL;
8659         }
8660 }
8661
8662 static int tg3_mem_rx_acquire(struct tg3 *tp)
8663 {
8664         unsigned int i, limit;
8665
8666         limit = tp->rxq_cnt;
8667
8668         /* If RSS is enabled, we need a (dummy) producer ring
8669          * set on vector zero.  This is the true hw prodring.
8670          */
8671         if (tg3_flag(tp, ENABLE_RSS))
8672                 limit++;
8673
8674         for (i = 0; i < limit; i++) {
8675                 struct tg3_napi *tnapi = &tp->napi[i];
8676
8677                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8678                         goto err_out;
8679
8680                 /* If multivector RSS is enabled, vector 0
8681                  * does not handle rx or tx interrupts.
8682                  * Don't allocate any resources for it.
8683                  */
8684                 if (!i && tg3_flag(tp, ENABLE_RSS))
8685                         continue;
8686
8687                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8688                                                     TG3_RX_RCB_RING_BYTES(tp),
8689                                                     &tnapi->rx_rcb_mapping,
8690                                                     GFP_KERNEL);
8691                 if (!tnapi->rx_rcb)
8692                         goto err_out;
8693         }
8694
8695         return 0;
8696
8697 err_out:
8698         tg3_mem_rx_release(tp);
8699         return -ENOMEM;
8700 }
8701
8702 /*
8703  * Must not be invoked with interrupt sources disabled and
8704  * the hardware shutdown down.
8705  */
8706 static void tg3_free_consistent(struct tg3 *tp)
8707 {
8708         int i;
8709
8710         for (i = 0; i < tp->irq_cnt; i++) {
8711                 struct tg3_napi *tnapi = &tp->napi[i];
8712
8713                 if (tnapi->hw_status) {
8714                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8715                                           tnapi->hw_status,
8716                                           tnapi->status_mapping);
8717                         tnapi->hw_status = NULL;
8718                 }
8719         }
8720
8721         tg3_mem_rx_release(tp);
8722         tg3_mem_tx_release(tp);
8723
8724         /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8725         tg3_full_lock(tp, 0);
8726         if (tp->hw_stats) {
8727                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8728                                   tp->hw_stats, tp->stats_mapping);
8729                 tp->hw_stats = NULL;
8730         }
8731         tg3_full_unlock(tp);
8732 }
8733
8734 /*
8735  * Must not be invoked with interrupt sources disabled and
8736  * the hardware shutdown down.  Can sleep.
8737  */
8738 static int tg3_alloc_consistent(struct tg3 *tp)
8739 {
8740         int i;
8741
8742         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8743                                            sizeof(struct tg3_hw_stats),
8744                                            &tp->stats_mapping, GFP_KERNEL);
8745         if (!tp->hw_stats)
8746                 goto err_out;
8747
8748         for (i = 0; i < tp->irq_cnt; i++) {
8749                 struct tg3_napi *tnapi = &tp->napi[i];
8750                 struct tg3_hw_status *sblk;
8751
8752                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8753                                                        TG3_HW_STATUS_SIZE,
8754                                                        &tnapi->status_mapping,
8755                                                        GFP_KERNEL);
8756                 if (!tnapi->hw_status)
8757                         goto err_out;
8758
8759                 sblk = tnapi->hw_status;
8760
8761                 if (tg3_flag(tp, ENABLE_RSS)) {
8762                         u16 *prodptr = NULL;
8763
8764                         /*
8765                          * When RSS is enabled, the status block format changes
8766                          * slightly.  The "rx_jumbo_consumer", "reserved",
8767                          * and "rx_mini_consumer" members get mapped to the
8768                          * other three rx return ring producer indexes.
8769                          */
8770                         switch (i) {
8771                         case 1:
8772                                 prodptr = &sblk->idx[0].rx_producer;
8773                                 break;
8774                         case 2:
8775                                 prodptr = &sblk->rx_jumbo_consumer;
8776                                 break;
8777                         case 3:
8778                                 prodptr = &sblk->reserved;
8779                                 break;
8780                         case 4:
8781                                 prodptr = &sblk->rx_mini_consumer;
8782                                 break;
8783                         }
8784                         tnapi->rx_rcb_prod_idx = prodptr;
8785                 } else {
8786                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8787                 }
8788         }
8789
8790         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8791                 goto err_out;
8792
8793         return 0;
8794
8795 err_out:
8796         tg3_free_consistent(tp);
8797         return -ENOMEM;
8798 }
8799
8800 #define MAX_WAIT_CNT 1000
8801
8802 /* To stop a block, clear the enable bit and poll till it
8803  * clears.  tp->lock is held.
8804  */
8805 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8806 {
8807         unsigned int i;
8808         u32 val;
8809
8810         if (tg3_flag(tp, 5705_PLUS)) {
8811                 switch (ofs) {
8812                 case RCVLSC_MODE:
8813                 case DMAC_MODE:
8814                 case MBFREE_MODE:
8815                 case BUFMGR_MODE:
8816                 case MEMARB_MODE:
8817                         /* We can't enable/disable these bits of the
8818                          * 5705/5750, just say success.
8819                          */
8820                         return 0;
8821
8822                 default:
8823                         break;
8824                 }
8825         }
8826
8827         val = tr32(ofs);
8828         val &= ~enable_bit;
8829         tw32_f(ofs, val);
8830
8831         for (i = 0; i < MAX_WAIT_CNT; i++) {
8832                 if (pci_channel_offline(tp->pdev)) {
8833                         dev_err(&tp->pdev->dev,
8834                                 "tg3_stop_block device offline, "
8835                                 "ofs=%lx enable_bit=%x\n",
8836                                 ofs, enable_bit);
8837                         return -ENODEV;
8838                 }
8839
8840                 udelay(100);
8841                 val = tr32(ofs);
8842                 if ((val & enable_bit) == 0)
8843                         break;
8844         }
8845
8846         if (i == MAX_WAIT_CNT && !silent) {
8847                 dev_err(&tp->pdev->dev,
8848                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8849                         ofs, enable_bit);
8850                 return -ENODEV;
8851         }
8852
8853         return 0;
8854 }
8855
8856 /* tp->lock is held. */
8857 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8858 {
8859         int i, err;
8860
8861         tg3_disable_ints(tp);
8862
8863         if (pci_channel_offline(tp->pdev)) {
8864                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8865                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8866                 err = -ENODEV;
8867                 goto err_no_dev;
8868         }
8869
8870         tp->rx_mode &= ~RX_MODE_ENABLE;
8871         tw32_f(MAC_RX_MODE, tp->rx_mode);
8872         udelay(10);
8873
8874         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8875         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8876         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8877         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8880
8881         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8882         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8883         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8884         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8885         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8887         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8888
8889         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8890         tw32_f(MAC_MODE, tp->mac_mode);
8891         udelay(40);
8892
8893         tp->tx_mode &= ~TX_MODE_ENABLE;
8894         tw32_f(MAC_TX_MODE, tp->tx_mode);
8895
8896         for (i = 0; i < MAX_WAIT_CNT; i++) {
8897                 udelay(100);
8898                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8899                         break;
8900         }
8901         if (i >= MAX_WAIT_CNT) {
8902                 dev_err(&tp->pdev->dev,
8903                         "%s timed out, TX_MODE_ENABLE will not clear "
8904                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8905                 err |= -ENODEV;
8906         }
8907
8908         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8909         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8910         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8911
8912         tw32(FTQ_RESET, 0xffffffff);
8913         tw32(FTQ_RESET, 0x00000000);
8914
8915         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8916         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8917
8918 err_no_dev:
8919         for (i = 0; i < tp->irq_cnt; i++) {
8920                 struct tg3_napi *tnapi = &tp->napi[i];
8921                 if (tnapi->hw_status)
8922                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8923         }
8924
8925         return err;
8926 }
8927
8928 /* Save PCI command register before chip reset */
8929 static void tg3_save_pci_state(struct tg3 *tp)
8930 {
8931         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8932 }
8933
8934 /* Restore PCI state after chip reset */
8935 static void tg3_restore_pci_state(struct tg3 *tp)
8936 {
8937         u32 val;
8938
8939         /* Re-enable indirect register accesses. */
8940         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8941                                tp->misc_host_ctrl);
8942
8943         /* Set MAX PCI retry to zero. */
8944         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8945         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8946             tg3_flag(tp, PCIX_MODE))
8947                 val |= PCISTATE_RETRY_SAME_DMA;
8948         /* Allow reads and writes to the APE register and memory space. */
8949         if (tg3_flag(tp, ENABLE_APE))
8950                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8951                        PCISTATE_ALLOW_APE_SHMEM_WR |
8952                        PCISTATE_ALLOW_APE_PSPACE_WR;
8953         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8954
8955         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8956
8957         if (!tg3_flag(tp, PCI_EXPRESS)) {
8958                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8959                                       tp->pci_cacheline_sz);
8960                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8961                                       tp->pci_lat_timer);
8962         }
8963
8964         /* Make sure PCI-X relaxed ordering bit is clear. */
8965         if (tg3_flag(tp, PCIX_MODE)) {
8966                 u16 pcix_cmd;
8967
8968                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8969                                      &pcix_cmd);
8970                 pcix_cmd &= ~PCI_X_CMD_ERO;
8971                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8972                                       pcix_cmd);
8973         }
8974
8975         if (tg3_flag(tp, 5780_CLASS)) {
8976
8977                 /* Chip reset on 5780 will reset MSI enable bit,
8978                  * so need to restore it.
8979                  */
8980                 if (tg3_flag(tp, USING_MSI)) {
8981                         u16 ctrl;
8982
8983                         pci_read_config_word(tp->pdev,
8984                                              tp->msi_cap + PCI_MSI_FLAGS,
8985                                              &ctrl);
8986                         pci_write_config_word(tp->pdev,
8987                                               tp->msi_cap + PCI_MSI_FLAGS,
8988                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8989                         val = tr32(MSGINT_MODE);
8990                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8991                 }
8992         }
8993 }
8994
8995 static void tg3_override_clk(struct tg3 *tp)
8996 {
8997         u32 val;
8998
8999         switch (tg3_asic_rev(tp)) {
9000         case ASIC_REV_5717:
9001                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9002                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9003                      TG3_CPMU_MAC_ORIDE_ENABLE);
9004                 break;
9005
9006         case ASIC_REV_5719:
9007         case ASIC_REV_5720:
9008                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9009                 break;
9010
9011         default:
9012                 return;
9013         }
9014 }
9015
9016 static void tg3_restore_clk(struct tg3 *tp)
9017 {
9018         u32 val;
9019
9020         switch (tg3_asic_rev(tp)) {
9021         case ASIC_REV_5717:
9022                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9023                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9024                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9025                 break;
9026
9027         case ASIC_REV_5719:
9028         case ASIC_REV_5720:
9029                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9030                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9031                 break;
9032
9033         default:
9034                 return;
9035         }
9036 }
9037
9038 /* tp->lock is held. */
9039 static int tg3_chip_reset(struct tg3 *tp)
9040         __releases(tp->lock)
9041         __acquires(tp->lock)
9042 {
9043         u32 val;
9044         void (*write_op)(struct tg3 *, u32, u32);
9045         int i, err;
9046
9047         if (!pci_device_is_present(tp->pdev))
9048                 return -ENODEV;
9049
9050         tg3_nvram_lock(tp);
9051
9052         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9053
9054         /* No matching tg3_nvram_unlock() after this because
9055          * chip reset below will undo the nvram lock.
9056          */
9057         tp->nvram_lock_cnt = 0;
9058
9059         /* GRC_MISC_CFG core clock reset will clear the memory
9060          * enable bit in PCI register 4 and the MSI enable bit
9061          * on some chips, so we save relevant registers here.
9062          */
9063         tg3_save_pci_state(tp);
9064
9065         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9066             tg3_flag(tp, 5755_PLUS))
9067                 tw32(GRC_FASTBOOT_PC, 0);
9068
9069         /*
9070          * We must avoid the readl() that normally takes place.
9071          * It locks machines, causes machine checks, and other
9072          * fun things.  So, temporarily disable the 5701
9073          * hardware workaround, while we do the reset.
9074          */
9075         write_op = tp->write32;
9076         if (write_op == tg3_write_flush_reg32)
9077                 tp->write32 = tg3_write32;
9078
9079         /* Prevent the irq handler from reading or writing PCI registers
9080          * during chip reset when the memory enable bit in the PCI command
9081          * register may be cleared.  The chip does not generate interrupt
9082          * at this time, but the irq handler may still be called due to irq
9083          * sharing or irqpoll.
9084          */
9085         tg3_flag_set(tp, CHIP_RESETTING);
9086         for (i = 0; i < tp->irq_cnt; i++) {
9087                 struct tg3_napi *tnapi = &tp->napi[i];
9088                 if (tnapi->hw_status) {
9089                         tnapi->hw_status->status = 0;
9090                         tnapi->hw_status->status_tag = 0;
9091                 }
9092                 tnapi->last_tag = 0;
9093                 tnapi->last_irq_tag = 0;
9094         }
9095         smp_mb();
9096
9097         tg3_full_unlock(tp);
9098
9099         for (i = 0; i < tp->irq_cnt; i++)
9100                 synchronize_irq(tp->napi[i].irq_vec);
9101
9102         tg3_full_lock(tp, 0);
9103
9104         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9105                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9106                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9107         }
9108
9109         /* do the reset */
9110         val = GRC_MISC_CFG_CORECLK_RESET;
9111
9112         if (tg3_flag(tp, PCI_EXPRESS)) {
9113                 /* Force PCIe 1.0a mode */
9114                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9115                     !tg3_flag(tp, 57765_PLUS) &&
9116                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9117                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9118                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9119
9120                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9121                         tw32(GRC_MISC_CFG, (1 << 29));
9122                         val |= (1 << 29);
9123                 }
9124         }
9125
9126         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9127                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9128                 tw32(GRC_VCPU_EXT_CTRL,
9129                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9130         }
9131
9132         /* Set the clock to the highest frequency to avoid timeouts. With link
9133          * aware mode, the clock speed could be slow and bootcode does not
9134          * complete within the expected time. Override the clock to allow the
9135          * bootcode to finish sooner and then restore it.
9136          */
9137         tg3_override_clk(tp);
9138
9139         /* Manage gphy power for all CPMU absent PCIe devices. */
9140         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9141                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9142
9143         tw32(GRC_MISC_CFG, val);
9144
9145         /* restore 5701 hardware bug workaround write method */
9146         tp->write32 = write_op;
9147
9148         /* Unfortunately, we have to delay before the PCI read back.
9149          * Some 575X chips even will not respond to a PCI cfg access
9150          * when the reset command is given to the chip.
9151          *
9152          * How do these hardware designers expect things to work
9153          * properly if the PCI write is posted for a long period
9154          * of time?  It is always necessary to have some method by
9155          * which a register read back can occur to push the write
9156          * out which does the reset.
9157          *
9158          * For most tg3 variants the trick below was working.
9159          * Ho hum...
9160          */
9161         udelay(120);
9162
9163         /* Flush PCI posted writes.  The normal MMIO registers
9164          * are inaccessible at this time so this is the only
9165          * way to make this reliably (actually, this is no longer
9166          * the case, see above).  I tried to use indirect
9167          * register read/write but this upset some 5701 variants.
9168          */
9169         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9170
9171         udelay(120);
9172
9173         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9174                 u16 val16;
9175
9176                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9177                         int j;
9178                         u32 cfg_val;
9179
9180                         /* Wait for link training to complete.  */
9181                         for (j = 0; j < 5000; j++)
9182                                 udelay(100);
9183
9184                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9185                         pci_write_config_dword(tp->pdev, 0xc4,
9186                                                cfg_val | (1 << 15));
9187                 }
9188
9189                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9190                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9191                 /*
9192                  * Older PCIe devices only support the 128 byte
9193                  * MPS setting.  Enforce the restriction.
9194                  */
9195                 if (!tg3_flag(tp, CPMU_PRESENT))
9196                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9197                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9198
9199                 /* Clear error status */
9200                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9201                                       PCI_EXP_DEVSTA_CED |
9202                                       PCI_EXP_DEVSTA_NFED |
9203                                       PCI_EXP_DEVSTA_FED |
9204                                       PCI_EXP_DEVSTA_URD);
9205         }
9206
9207         tg3_restore_pci_state(tp);
9208
9209         tg3_flag_clear(tp, CHIP_RESETTING);
9210         tg3_flag_clear(tp, ERROR_PROCESSED);
9211
9212         val = 0;
9213         if (tg3_flag(tp, 5780_CLASS))
9214                 val = tr32(MEMARB_MODE);
9215         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9216
9217         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9218                 tg3_stop_fw(tp);
9219                 tw32(0x5000, 0x400);
9220         }
9221
9222         if (tg3_flag(tp, IS_SSB_CORE)) {
9223                 /*
9224                  * BCM4785: In order to avoid repercussions from using
9225                  * potentially defective internal ROM, stop the Rx RISC CPU,
9226                  * which is not required.
9227                  */
9228                 tg3_stop_fw(tp);
9229                 tg3_halt_cpu(tp, RX_CPU_BASE);
9230         }
9231
9232         err = tg3_poll_fw(tp);
9233         if (err)
9234                 return err;
9235
9236         tw32(GRC_MODE, tp->grc_mode);
9237
9238         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9239                 val = tr32(0xc4);
9240
9241                 tw32(0xc4, val | (1 << 15));
9242         }
9243
9244         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9245             tg3_asic_rev(tp) == ASIC_REV_5705) {
9246                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9247                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9248                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9249                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9250         }
9251
9252         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9253                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9254                 val = tp->mac_mode;
9255         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9256                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9257                 val = tp->mac_mode;
9258         } else
9259                 val = 0;
9260
9261         tw32_f(MAC_MODE, val);
9262         udelay(40);
9263
9264         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9265
9266         tg3_mdio_start(tp);
9267
9268         if (tg3_flag(tp, PCI_EXPRESS) &&
9269             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9270             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9271             !tg3_flag(tp, 57765_PLUS)) {
9272                 val = tr32(0x7c00);
9273
9274                 tw32(0x7c00, val | (1 << 25));
9275         }
9276
9277         tg3_restore_clk(tp);
9278
9279         /* Reprobe ASF enable state.  */
9280         tg3_flag_clear(tp, ENABLE_ASF);
9281         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9282                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9283
9284         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9285         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9286         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9287                 u32 nic_cfg;
9288
9289                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9290                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9291                         tg3_flag_set(tp, ENABLE_ASF);
9292                         tp->last_event_jiffies = jiffies;
9293                         if (tg3_flag(tp, 5750_PLUS))
9294                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9295
9296                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9297                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9298                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9299                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9300                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9301                 }
9302         }
9303
9304         return 0;
9305 }
9306
9307 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9308 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9309 static void __tg3_set_rx_mode(struct net_device *);
9310
9311 /* tp->lock is held. */
9312 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9313 {
9314         int err;
9315
9316         tg3_stop_fw(tp);
9317
9318         tg3_write_sig_pre_reset(tp, kind);
9319
9320         tg3_abort_hw(tp, silent);
9321         err = tg3_chip_reset(tp);
9322
9323         __tg3_set_mac_addr(tp, false);
9324
9325         tg3_write_sig_legacy(tp, kind);
9326         tg3_write_sig_post_reset(tp, kind);
9327
9328         if (tp->hw_stats) {
9329                 /* Save the stats across chip resets... */
9330                 tg3_get_nstats(tp, &tp->net_stats_prev);
9331                 tg3_get_estats(tp, &tp->estats_prev);
9332
9333                 /* And make sure the next sample is new data */
9334                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9335         }
9336
9337         return err;
9338 }
9339
9340 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9341 {
9342         struct tg3 *tp = netdev_priv(dev);
9343         struct sockaddr *addr = p;
9344         int err = 0;
9345         bool skip_mac_1 = false;
9346
9347         if (!is_valid_ether_addr(addr->sa_data))
9348                 return -EADDRNOTAVAIL;
9349
9350         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9351
9352         if (!netif_running(dev))
9353                 return 0;
9354
9355         if (tg3_flag(tp, ENABLE_ASF)) {
9356                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9357
9358                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9359                 addr0_low = tr32(MAC_ADDR_0_LOW);
9360                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9361                 addr1_low = tr32(MAC_ADDR_1_LOW);
9362
9363                 /* Skip MAC addr 1 if ASF is using it. */
9364                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9365                     !(addr1_high == 0 && addr1_low == 0))
9366                         skip_mac_1 = true;
9367         }
9368         spin_lock_bh(&tp->lock);
9369         __tg3_set_mac_addr(tp, skip_mac_1);
9370         __tg3_set_rx_mode(dev);
9371         spin_unlock_bh(&tp->lock);
9372
9373         return err;
9374 }
9375
9376 /* tp->lock is held. */
9377 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9378                            dma_addr_t mapping, u32 maxlen_flags,
9379                            u32 nic_addr)
9380 {
9381         tg3_write_mem(tp,
9382                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9383                       ((u64) mapping >> 32));
9384         tg3_write_mem(tp,
9385                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9386                       ((u64) mapping & 0xffffffff));
9387         tg3_write_mem(tp,
9388                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9389                        maxlen_flags);
9390
9391         if (!tg3_flag(tp, 5705_PLUS))
9392                 tg3_write_mem(tp,
9393                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9394                               nic_addr);
9395 }
9396
9397
9398 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9399 {
9400         int i = 0;
9401
9402         if (!tg3_flag(tp, ENABLE_TSS)) {
9403                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9404                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9405                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9406         } else {
9407                 tw32(HOSTCC_TXCOL_TICKS, 0);
9408                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9409                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9410
9411                 for (; i < tp->txq_cnt; i++) {
9412                         u32 reg;
9413
9414                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9415                         tw32(reg, ec->tx_coalesce_usecs);
9416                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9417                         tw32(reg, ec->tx_max_coalesced_frames);
9418                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9419                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9420                 }
9421         }
9422
9423         for (; i < tp->irq_max - 1; i++) {
9424                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9425                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9426                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9427         }
9428 }
9429
9430 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9431 {
9432         int i = 0;
9433         u32 limit = tp->rxq_cnt;
9434
9435         if (!tg3_flag(tp, ENABLE_RSS)) {
9436                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9437                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9438                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9439                 limit--;
9440         } else {
9441                 tw32(HOSTCC_RXCOL_TICKS, 0);
9442                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9443                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9444         }
9445
9446         for (; i < limit; i++) {
9447                 u32 reg;
9448
9449                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9450                 tw32(reg, ec->rx_coalesce_usecs);
9451                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9452                 tw32(reg, ec->rx_max_coalesced_frames);
9453                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9454                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9455         }
9456
9457         for (; i < tp->irq_max - 1; i++) {
9458                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9459                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9460                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9461         }
9462 }
9463
9464 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9465 {
9466         tg3_coal_tx_init(tp, ec);
9467         tg3_coal_rx_init(tp, ec);
9468
9469         if (!tg3_flag(tp, 5705_PLUS)) {
9470                 u32 val = ec->stats_block_coalesce_usecs;
9471
9472                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9473                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9474
9475                 if (!tp->link_up)
9476                         val = 0;
9477
9478                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9479         }
9480 }
9481
9482 /* tp->lock is held. */
9483 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9484 {
9485         u32 txrcb, limit;
9486
9487         /* Disable all transmit rings but the first. */
9488         if (!tg3_flag(tp, 5705_PLUS))
9489                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9490         else if (tg3_flag(tp, 5717_PLUS))
9491                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9492         else if (tg3_flag(tp, 57765_CLASS) ||
9493                  tg3_asic_rev(tp) == ASIC_REV_5762)
9494                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9495         else
9496                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9497
9498         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9499              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9500                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9501                               BDINFO_FLAGS_DISABLED);
9502 }
9503
9504 /* tp->lock is held. */
9505 static void tg3_tx_rcbs_init(struct tg3 *tp)
9506 {
9507         int i = 0;
9508         u32 txrcb = NIC_SRAM_SEND_RCB;
9509
9510         if (tg3_flag(tp, ENABLE_TSS))
9511                 i++;
9512
9513         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9514                 struct tg3_napi *tnapi = &tp->napi[i];
9515
9516                 if (!tnapi->tx_ring)
9517                         continue;
9518
9519                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9520                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9521                                NIC_SRAM_TX_BUFFER_DESC);
9522         }
9523 }
9524
9525 /* tp->lock is held. */
9526 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9527 {
9528         u32 rxrcb, limit;
9529
9530         /* Disable all receive return rings but the first. */
9531         if (tg3_flag(tp, 5717_PLUS))
9532                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9533         else if (!tg3_flag(tp, 5705_PLUS))
9534                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9535         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9536                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9537                  tg3_flag(tp, 57765_CLASS))
9538                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9539         else
9540                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9541
9542         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9543              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9544                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9545                               BDINFO_FLAGS_DISABLED);
9546 }
9547
9548 /* tp->lock is held. */
9549 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9550 {
9551         int i = 0;
9552         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9553
9554         if (tg3_flag(tp, ENABLE_RSS))
9555                 i++;
9556
9557         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9558                 struct tg3_napi *tnapi = &tp->napi[i];
9559
9560                 if (!tnapi->rx_rcb)
9561                         continue;
9562
9563                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9564                                (tp->rx_ret_ring_mask + 1) <<
9565                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9566         }
9567 }
9568
9569 /* tp->lock is held. */
9570 static void tg3_rings_reset(struct tg3 *tp)
9571 {
9572         int i;
9573         u32 stblk;
9574         struct tg3_napi *tnapi = &tp->napi[0];
9575
9576         tg3_tx_rcbs_disable(tp);
9577
9578         tg3_rx_ret_rcbs_disable(tp);
9579
9580         /* Disable interrupts */
9581         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9582         tp->napi[0].chk_msi_cnt = 0;
9583         tp->napi[0].last_rx_cons = 0;
9584         tp->napi[0].last_tx_cons = 0;
9585
9586         /* Zero mailbox registers. */
9587         if (tg3_flag(tp, SUPPORT_MSIX)) {
9588                 for (i = 1; i < tp->irq_max; i++) {
9589                         tp->napi[i].tx_prod = 0;
9590                         tp->napi[i].tx_cons = 0;
9591                         if (tg3_flag(tp, ENABLE_TSS))
9592                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9593                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9594                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9595                         tp->napi[i].chk_msi_cnt = 0;
9596                         tp->napi[i].last_rx_cons = 0;
9597                         tp->napi[i].last_tx_cons = 0;
9598                 }
9599                 if (!tg3_flag(tp, ENABLE_TSS))
9600                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9601         } else {
9602                 tp->napi[0].tx_prod = 0;
9603                 tp->napi[0].tx_cons = 0;
9604                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9605                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9606         }
9607
9608         /* Make sure the NIC-based send BD rings are disabled. */
9609         if (!tg3_flag(tp, 5705_PLUS)) {
9610                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9611                 for (i = 0; i < 16; i++)
9612                         tw32_tx_mbox(mbox + i * 8, 0);
9613         }
9614
9615         /* Clear status block in ram. */
9616         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9617
9618         /* Set status block DMA address */
9619         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9620              ((u64) tnapi->status_mapping >> 32));
9621         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9622              ((u64) tnapi->status_mapping & 0xffffffff));
9623
9624         stblk = HOSTCC_STATBLCK_RING1;
9625
9626         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9627                 u64 mapping = (u64)tnapi->status_mapping;
9628                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9629                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9630                 stblk += 8;
9631
9632                 /* Clear status block in ram. */
9633                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9634         }
9635
9636         tg3_tx_rcbs_init(tp);
9637         tg3_rx_ret_rcbs_init(tp);
9638 }
9639
9640 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9641 {
9642         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9643
9644         if (!tg3_flag(tp, 5750_PLUS) ||
9645             tg3_flag(tp, 5780_CLASS) ||
9646             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9647             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9648             tg3_flag(tp, 57765_PLUS))
9649                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9650         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9651                  tg3_asic_rev(tp) == ASIC_REV_5787)
9652                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9653         else
9654                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9655
9656         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9657         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9658
9659         val = min(nic_rep_thresh, host_rep_thresh);
9660         tw32(RCVBDI_STD_THRESH, val);
9661
9662         if (tg3_flag(tp, 57765_PLUS))
9663                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9664
9665         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9666                 return;
9667
9668         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9669
9670         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9671
9672         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9673         tw32(RCVBDI_JUMBO_THRESH, val);
9674
9675         if (tg3_flag(tp, 57765_PLUS))
9676                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9677 }
9678
9679 static inline u32 calc_crc(unsigned char *buf, int len)
9680 {
9681         u32 reg;
9682         u32 tmp;
9683         int j, k;
9684
9685         reg = 0xffffffff;
9686
9687         for (j = 0; j < len; j++) {
9688                 reg ^= buf[j];
9689
9690                 for (k = 0; k < 8; k++) {
9691                         tmp = reg & 0x01;
9692
9693                         reg >>= 1;
9694
9695                         if (tmp)
9696                                 reg ^= 0xedb88320;
9697                 }
9698         }
9699
9700         return ~reg;
9701 }
9702
9703 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9704 {
9705         /* accept or reject all multicast frames */
9706         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9707         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9708         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9709         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9710 }
9711
9712 static void __tg3_set_rx_mode(struct net_device *dev)
9713 {
9714         struct tg3 *tp = netdev_priv(dev);
9715         u32 rx_mode;
9716
9717         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9718                                   RX_MODE_KEEP_VLAN_TAG);
9719
9720 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9721         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9722          * flag clear.
9723          */
9724         if (!tg3_flag(tp, ENABLE_ASF))
9725                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9726 #endif
9727
9728         if (dev->flags & IFF_PROMISC) {
9729                 /* Promiscuous mode. */
9730                 rx_mode |= RX_MODE_PROMISC;
9731         } else if (dev->flags & IFF_ALLMULTI) {
9732                 /* Accept all multicast. */
9733                 tg3_set_multi(tp, 1);
9734         } else if (netdev_mc_empty(dev)) {
9735                 /* Reject all multicast. */
9736                 tg3_set_multi(tp, 0);
9737         } else {
9738                 /* Accept one or more multicast(s). */
9739                 struct netdev_hw_addr *ha;
9740                 u32 mc_filter[4] = { 0, };
9741                 u32 regidx;
9742                 u32 bit;
9743                 u32 crc;
9744
9745                 netdev_for_each_mc_addr(ha, dev) {
9746                         crc = calc_crc(ha->addr, ETH_ALEN);
9747                         bit = ~crc & 0x7f;
9748                         regidx = (bit & 0x60) >> 5;
9749                         bit &= 0x1f;
9750                         mc_filter[regidx] |= (1 << bit);
9751                 }
9752
9753                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9754                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9755                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9756                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9757         }
9758
9759         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9760                 rx_mode |= RX_MODE_PROMISC;
9761         } else if (!(dev->flags & IFF_PROMISC)) {
9762                 /* Add all entries into to the mac addr filter list */
9763                 int i = 0;
9764                 struct netdev_hw_addr *ha;
9765
9766                 netdev_for_each_uc_addr(ha, dev) {
9767                         __tg3_set_one_mac_addr(tp, ha->addr,
9768                                                i + TG3_UCAST_ADDR_IDX(tp));
9769                         i++;
9770                 }
9771         }
9772
9773         if (rx_mode != tp->rx_mode) {
9774                 tp->rx_mode = rx_mode;
9775                 tw32_f(MAC_RX_MODE, rx_mode);
9776                 udelay(10);
9777         }
9778 }
9779
9780 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9781 {
9782         int i;
9783
9784         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9785                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9786 }
9787
9788 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9789 {
9790         int i;
9791
9792         if (!tg3_flag(tp, SUPPORT_MSIX))
9793                 return;
9794
9795         if (tp->rxq_cnt == 1) {
9796                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9797                 return;
9798         }
9799
9800         /* Validate table against current IRQ count */
9801         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9802                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9803                         break;
9804         }
9805
9806         if (i != TG3_RSS_INDIR_TBL_SIZE)
9807                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9808 }
9809
9810 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9811 {
9812         int i = 0;
9813         u32 reg = MAC_RSS_INDIR_TBL_0;
9814
9815         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9816                 u32 val = tp->rss_ind_tbl[i];
9817                 i++;
9818                 for (; i % 8; i++) {
9819                         val <<= 4;
9820                         val |= tp->rss_ind_tbl[i];
9821                 }
9822                 tw32(reg, val);
9823                 reg += 4;
9824         }
9825 }
9826
9827 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9828 {
9829         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9830                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9831         else
9832                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9833 }
9834
9835 /* tp->lock is held. */
9836 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9837 {
9838         u32 val, rdmac_mode;
9839         int i, err, limit;
9840         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9841
9842         tg3_disable_ints(tp);
9843
9844         tg3_stop_fw(tp);
9845
9846         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9847
9848         if (tg3_flag(tp, INIT_COMPLETE))
9849                 tg3_abort_hw(tp, 1);
9850
9851         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9852             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9853                 tg3_phy_pull_config(tp);
9854                 tg3_eee_pull_config(tp, NULL);
9855                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9856         }
9857
9858         /* Enable MAC control of LPI */
9859         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9860                 tg3_setup_eee(tp);
9861
9862         if (reset_phy)
9863                 tg3_phy_reset(tp);
9864
9865         err = tg3_chip_reset(tp);
9866         if (err)
9867                 return err;
9868
9869         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9870
9871         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9872                 val = tr32(TG3_CPMU_CTRL);
9873                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9874                 tw32(TG3_CPMU_CTRL, val);
9875
9876                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9877                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9878                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9879                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9880
9881                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9882                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9883                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9884                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9885
9886                 val = tr32(TG3_CPMU_HST_ACC);
9887                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9888                 val |= CPMU_HST_ACC_MACCLK_6_25;
9889                 tw32(TG3_CPMU_HST_ACC, val);
9890         }
9891
9892         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9893                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9894                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9895                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9896                 tw32(PCIE_PWR_MGMT_THRESH, val);
9897
9898                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9899                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9900
9901                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9902
9903                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9904                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9905         }
9906
9907         if (tg3_flag(tp, L1PLLPD_EN)) {
9908                 u32 grc_mode = tr32(GRC_MODE);
9909
9910                 /* Access the lower 1K of PL PCIE block registers. */
9911                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9912                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9913
9914                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9915                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9916                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9917
9918                 tw32(GRC_MODE, grc_mode);
9919         }
9920
9921         if (tg3_flag(tp, 57765_CLASS)) {
9922                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9923                         u32 grc_mode = tr32(GRC_MODE);
9924
9925                         /* Access the lower 1K of PL PCIE block registers. */
9926                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9927                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9928
9929                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9930                                    TG3_PCIE_PL_LO_PHYCTL5);
9931                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9932                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9933
9934                         tw32(GRC_MODE, grc_mode);
9935                 }
9936
9937                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9938                         u32 grc_mode;
9939
9940                         /* Fix transmit hangs */
9941                         val = tr32(TG3_CPMU_PADRNG_CTL);
9942                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9943                         tw32(TG3_CPMU_PADRNG_CTL, val);
9944
9945                         grc_mode = tr32(GRC_MODE);
9946
9947                         /* Access the lower 1K of DL PCIE block registers. */
9948                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9949                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9950
9951                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9952                                    TG3_PCIE_DL_LO_FTSMAX);
9953                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9954                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9955                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9956
9957                         tw32(GRC_MODE, grc_mode);
9958                 }
9959
9960                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9961                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9962                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9963                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9964         }
9965
9966         /* This works around an issue with Athlon chipsets on
9967          * B3 tigon3 silicon.  This bit has no effect on any
9968          * other revision.  But do not set this on PCI Express
9969          * chips and don't even touch the clocks if the CPMU is present.
9970          */
9971         if (!tg3_flag(tp, CPMU_PRESENT)) {
9972                 if (!tg3_flag(tp, PCI_EXPRESS))
9973                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9974                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9975         }
9976
9977         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9978             tg3_flag(tp, PCIX_MODE)) {
9979                 val = tr32(TG3PCI_PCISTATE);
9980                 val |= PCISTATE_RETRY_SAME_DMA;
9981                 tw32(TG3PCI_PCISTATE, val);
9982         }
9983
9984         if (tg3_flag(tp, ENABLE_APE)) {
9985                 /* Allow reads and writes to the
9986                  * APE register and memory space.
9987                  */
9988                 val = tr32(TG3PCI_PCISTATE);
9989                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9990                        PCISTATE_ALLOW_APE_SHMEM_WR |
9991                        PCISTATE_ALLOW_APE_PSPACE_WR;
9992                 tw32(TG3PCI_PCISTATE, val);
9993         }
9994
9995         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9996                 /* Enable some hw fixes.  */
9997                 val = tr32(TG3PCI_MSI_DATA);
9998                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9999                 tw32(TG3PCI_MSI_DATA, val);
10000         }
10001
10002         /* Descriptor ring init may make accesses to the
10003          * NIC SRAM area to setup the TX descriptors, so we
10004          * can only do this after the hardware has been
10005          * successfully reset.
10006          */
10007         err = tg3_init_rings(tp);
10008         if (err)
10009                 return err;
10010
10011         if (tg3_flag(tp, 57765_PLUS)) {
10012                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10013                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10014                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10015                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10016                 if (!tg3_flag(tp, 57765_CLASS) &&
10017                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10018                     tg3_asic_rev(tp) != ASIC_REV_5762)
10019                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10020                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10021         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10022                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10023                 /* This value is determined during the probe time DMA
10024                  * engine test, tg3_test_dma.
10025                  */
10026                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10027         }
10028
10029         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10030                           GRC_MODE_4X_NIC_SEND_RINGS |
10031                           GRC_MODE_NO_TX_PHDR_CSUM |
10032                           GRC_MODE_NO_RX_PHDR_CSUM);
10033         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10034
10035         /* Pseudo-header checksum is done by hardware logic and not
10036          * the offload processers, so make the chip do the pseudo-
10037          * header checksums on receive.  For transmit it is more
10038          * convenient to do the pseudo-header checksum in software
10039          * as Linux does that on transmit for us in all cases.
10040          */
10041         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10042
10043         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10044         if (tp->rxptpctl)
10045                 tw32(TG3_RX_PTP_CTL,
10046                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10047
10048         if (tg3_flag(tp, PTP_CAPABLE))
10049                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10050
10051         tw32(GRC_MODE, tp->grc_mode | val);
10052
10053         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10054         val = tr32(GRC_MISC_CFG);
10055         val &= ~0xff;
10056         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10057         tw32(GRC_MISC_CFG, val);
10058
10059         /* Initialize MBUF/DESC pool. */
10060         if (tg3_flag(tp, 5750_PLUS)) {
10061                 /* Do nothing.  */
10062         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10063                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10064                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10065                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10066                 else
10067                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10068                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10069                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10070         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10071                 int fw_len;
10072
10073                 fw_len = tp->fw_len;
10074                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10075                 tw32(BUFMGR_MB_POOL_ADDR,
10076                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10077                 tw32(BUFMGR_MB_POOL_SIZE,
10078                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10079         }
10080
10081         if (tp->dev->mtu <= ETH_DATA_LEN) {
10082                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10083                      tp->bufmgr_config.mbuf_read_dma_low_water);
10084                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10085                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10086                 tw32(BUFMGR_MB_HIGH_WATER,
10087                      tp->bufmgr_config.mbuf_high_water);
10088         } else {
10089                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10090                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10091                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10092                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10093                 tw32(BUFMGR_MB_HIGH_WATER,
10094                      tp->bufmgr_config.mbuf_high_water_jumbo);
10095         }
10096         tw32(BUFMGR_DMA_LOW_WATER,
10097              tp->bufmgr_config.dma_low_water);
10098         tw32(BUFMGR_DMA_HIGH_WATER,
10099              tp->bufmgr_config.dma_high_water);
10100
10101         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10102         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10103                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10104         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10105             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10106             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10107             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10108                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10109         tw32(BUFMGR_MODE, val);
10110         for (i = 0; i < 2000; i++) {
10111                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10112                         break;
10113                 udelay(10);
10114         }
10115         if (i >= 2000) {
10116                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10117                 return -ENODEV;
10118         }
10119
10120         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10121                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10122
10123         tg3_setup_rxbd_thresholds(tp);
10124
10125         /* Initialize TG3_BDINFO's at:
10126          *  RCVDBDI_STD_BD:     standard eth size rx ring
10127          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10128          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10129          *
10130          * like so:
10131          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10132          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10133          *                              ring attribute flags
10134          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10135          *
10136          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10137          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10138          *
10139          * The size of each ring is fixed in the firmware, but the location is
10140          * configurable.
10141          */
10142         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10143              ((u64) tpr->rx_std_mapping >> 32));
10144         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10145              ((u64) tpr->rx_std_mapping & 0xffffffff));
10146         if (!tg3_flag(tp, 5717_PLUS))
10147                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10148                      NIC_SRAM_RX_BUFFER_DESC);
10149
10150         /* Disable the mini ring */
10151         if (!tg3_flag(tp, 5705_PLUS))
10152                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10153                      BDINFO_FLAGS_DISABLED);
10154
10155         /* Program the jumbo buffer descriptor ring control
10156          * blocks on those devices that have them.
10157          */
10158         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10159             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10160
10161                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10162                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10163                              ((u64) tpr->rx_jmb_mapping >> 32));
10164                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10165                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10166                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10167                               BDINFO_FLAGS_MAXLEN_SHIFT;
10168                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10169                              val | BDINFO_FLAGS_USE_EXT_RECV);
10170                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10171                             tg3_flag(tp, 57765_CLASS) ||
10172                             tg3_asic_rev(tp) == ASIC_REV_5762)
10173                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10174                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10175                 } else {
10176                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10177                              BDINFO_FLAGS_DISABLED);
10178                 }
10179
10180                 if (tg3_flag(tp, 57765_PLUS)) {
10181                         val = TG3_RX_STD_RING_SIZE(tp);
10182                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10183                         val |= (TG3_RX_STD_DMA_SZ << 2);
10184                 } else
10185                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10186         } else
10187                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10188
10189         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10190
10191         tpr->rx_std_prod_idx = tp->rx_pending;
10192         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10193
10194         tpr->rx_jmb_prod_idx =
10195                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10196         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10197
10198         tg3_rings_reset(tp);
10199
10200         /* Initialize MAC address and backoff seed. */
10201         __tg3_set_mac_addr(tp, false);
10202
10203         /* MTU + ethernet header + FCS + optional VLAN tag */
10204         tw32(MAC_RX_MTU_SIZE,
10205              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10206
10207         /* The slot time is changed by tg3_setup_phy if we
10208          * run at gigabit with half duplex.
10209          */
10210         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10211               (6 << TX_LENGTHS_IPG_SHIFT) |
10212               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10213
10214         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10215             tg3_asic_rev(tp) == ASIC_REV_5762)
10216                 val |= tr32(MAC_TX_LENGTHS) &
10217                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10218                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10219
10220         tw32(MAC_TX_LENGTHS, val);
10221
10222         /* Receive rules. */
10223         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10224         tw32(RCVLPC_CONFIG, 0x0181);
10225
10226         /* Calculate RDMAC_MODE setting early, we need it to determine
10227          * the RCVLPC_STATE_ENABLE mask.
10228          */
10229         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10230                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10231                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10232                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10233                       RDMAC_MODE_LNGREAD_ENAB);
10234
10235         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10236                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10237
10238         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10239             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10240             tg3_asic_rev(tp) == ASIC_REV_57780)
10241                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10242                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10243                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10244
10245         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10246             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10247                 if (tg3_flag(tp, TSO_CAPABLE) &&
10248                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10249                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10250                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10251                            !tg3_flag(tp, IS_5788)) {
10252                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10253                 }
10254         }
10255
10256         if (tg3_flag(tp, PCI_EXPRESS))
10257                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10258
10259         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10260                 tp->dma_limit = 0;
10261                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10262                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10263                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10264                 }
10265         }
10266
10267         if (tg3_flag(tp, HW_TSO_1) ||
10268             tg3_flag(tp, HW_TSO_2) ||
10269             tg3_flag(tp, HW_TSO_3))
10270                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10271
10272         if (tg3_flag(tp, 57765_PLUS) ||
10273             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10274             tg3_asic_rev(tp) == ASIC_REV_57780)
10275                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10276
10277         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10278             tg3_asic_rev(tp) == ASIC_REV_5762)
10279                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10280
10281         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10282             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10283             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10284             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10285             tg3_flag(tp, 57765_PLUS)) {
10286                 u32 tgtreg;
10287
10288                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10289                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10290                 else
10291                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10292
10293                 val = tr32(tgtreg);
10294                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10295                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10296                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10297                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10298                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10299                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10300                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10301                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10302                 }
10303                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10304         }
10305
10306         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10307             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10308             tg3_asic_rev(tp) == ASIC_REV_5762) {
10309                 u32 tgtreg;
10310
10311                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10312                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10313                 else
10314                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10315
10316                 val = tr32(tgtreg);
10317                 tw32(tgtreg, val |
10318                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10319                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10320         }
10321
10322         /* Receive/send statistics. */
10323         if (tg3_flag(tp, 5750_PLUS)) {
10324                 val = tr32(RCVLPC_STATS_ENABLE);
10325                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10326                 tw32(RCVLPC_STATS_ENABLE, val);
10327         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10328                    tg3_flag(tp, TSO_CAPABLE)) {
10329                 val = tr32(RCVLPC_STATS_ENABLE);
10330                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10331                 tw32(RCVLPC_STATS_ENABLE, val);
10332         } else {
10333                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10334         }
10335         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10336         tw32(SNDDATAI_STATSENAB, 0xffffff);
10337         tw32(SNDDATAI_STATSCTRL,
10338              (SNDDATAI_SCTRL_ENABLE |
10339               SNDDATAI_SCTRL_FASTUPD));
10340
10341         /* Setup host coalescing engine. */
10342         tw32(HOSTCC_MODE, 0);
10343         for (i = 0; i < 2000; i++) {
10344                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10345                         break;
10346                 udelay(10);
10347         }
10348
10349         __tg3_set_coalesce(tp, &tp->coal);
10350
10351         if (!tg3_flag(tp, 5705_PLUS)) {
10352                 /* Status/statistics block address.  See tg3_timer,
10353                  * the tg3_periodic_fetch_stats call there, and
10354                  * tg3_get_stats to see how this works for 5705/5750 chips.
10355                  */
10356                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10357                      ((u64) tp->stats_mapping >> 32));
10358                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10359                      ((u64) tp->stats_mapping & 0xffffffff));
10360                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10361
10362                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10363
10364                 /* Clear statistics and status block memory areas */
10365                 for (i = NIC_SRAM_STATS_BLK;
10366                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10367                      i += sizeof(u32)) {
10368                         tg3_write_mem(tp, i, 0);
10369                         udelay(40);
10370                 }
10371         }
10372
10373         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10374
10375         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10376         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10377         if (!tg3_flag(tp, 5705_PLUS))
10378                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10379
10380         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10381                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10382                 /* reset to prevent losing 1st rx packet intermittently */
10383                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10384                 udelay(10);
10385         }
10386
10387         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10388                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10389                         MAC_MODE_FHDE_ENABLE;
10390         if (tg3_flag(tp, ENABLE_APE))
10391                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10392         if (!tg3_flag(tp, 5705_PLUS) &&
10393             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10394             tg3_asic_rev(tp) != ASIC_REV_5700)
10395                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10396         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10397         udelay(40);
10398
10399         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10400          * If TG3_FLAG_IS_NIC is zero, we should read the
10401          * register to preserve the GPIO settings for LOMs. The GPIOs,
10402          * whether used as inputs or outputs, are set by boot code after
10403          * reset.
10404          */
10405         if (!tg3_flag(tp, IS_NIC)) {
10406                 u32 gpio_mask;
10407
10408                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10409                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10410                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10411
10412                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10413                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10414                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10415
10416                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10417                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10418
10419                 tp->grc_local_ctrl &= ~gpio_mask;
10420                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10421
10422                 /* GPIO1 must be driven high for eeprom write protect */
10423                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10424                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10425                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10426         }
10427         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10428         udelay(100);
10429
10430         if (tg3_flag(tp, USING_MSIX)) {
10431                 val = tr32(MSGINT_MODE);
10432                 val |= MSGINT_MODE_ENABLE;
10433                 if (tp->irq_cnt > 1)
10434                         val |= MSGINT_MODE_MULTIVEC_EN;
10435                 if (!tg3_flag(tp, 1SHOT_MSI))
10436                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10437                 tw32(MSGINT_MODE, val);
10438         }
10439
10440         if (!tg3_flag(tp, 5705_PLUS)) {
10441                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10442                 udelay(40);
10443         }
10444
10445         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10446                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10447                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10448                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10449                WDMAC_MODE_LNGREAD_ENAB);
10450
10451         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10452             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10453                 if (tg3_flag(tp, TSO_CAPABLE) &&
10454                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10455                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10456                         /* nothing */
10457                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10458                            !tg3_flag(tp, IS_5788)) {
10459                         val |= WDMAC_MODE_RX_ACCEL;
10460                 }
10461         }
10462
10463         /* Enable host coalescing bug fix */
10464         if (tg3_flag(tp, 5755_PLUS))
10465                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10466
10467         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10468                 val |= WDMAC_MODE_BURST_ALL_DATA;
10469
10470         tw32_f(WDMAC_MODE, val);
10471         udelay(40);
10472
10473         if (tg3_flag(tp, PCIX_MODE)) {
10474                 u16 pcix_cmd;
10475
10476                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10477                                      &pcix_cmd);
10478                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10479                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10480                         pcix_cmd |= PCI_X_CMD_READ_2K;
10481                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10482                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10483                         pcix_cmd |= PCI_X_CMD_READ_2K;
10484                 }
10485                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10486                                       pcix_cmd);
10487         }
10488
10489         tw32_f(RDMAC_MODE, rdmac_mode);
10490         udelay(40);
10491
10492         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10493             tg3_asic_rev(tp) == ASIC_REV_5720) {
10494                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10495                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10496                                 break;
10497                 }
10498                 if (i < TG3_NUM_RDMA_CHANNELS) {
10499                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10500                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10501                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10502                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10503                 }
10504         }
10505
10506         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10507         if (!tg3_flag(tp, 5705_PLUS))
10508                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10509
10510         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10511                 tw32(SNDDATAC_MODE,
10512                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10513         else
10514                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10515
10516         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10517         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10518         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10519         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10520                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10521         tw32(RCVDBDI_MODE, val);
10522         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10523         if (tg3_flag(tp, HW_TSO_1) ||
10524             tg3_flag(tp, HW_TSO_2) ||
10525             tg3_flag(tp, HW_TSO_3))
10526                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10527         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10528         if (tg3_flag(tp, ENABLE_TSS))
10529                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10530         tw32(SNDBDI_MODE, val);
10531         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10532
10533         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10534                 err = tg3_load_5701_a0_firmware_fix(tp);
10535                 if (err)
10536                         return err;
10537         }
10538
10539         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10540                 /* Ignore any errors for the firmware download. If download
10541                  * fails, the device will operate with EEE disabled
10542                  */
10543                 tg3_load_57766_firmware(tp);
10544         }
10545
10546         if (tg3_flag(tp, TSO_CAPABLE)) {
10547                 err = tg3_load_tso_firmware(tp);
10548                 if (err)
10549                         return err;
10550         }
10551
10552         tp->tx_mode = TX_MODE_ENABLE;
10553
10554         if (tg3_flag(tp, 5755_PLUS) ||
10555             tg3_asic_rev(tp) == ASIC_REV_5906)
10556                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10557
10558         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10559             tg3_asic_rev(tp) == ASIC_REV_5762) {
10560                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10561                 tp->tx_mode &= ~val;
10562                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10563         }
10564
10565         tw32_f(MAC_TX_MODE, tp->tx_mode);
10566         udelay(100);
10567
10568         if (tg3_flag(tp, ENABLE_RSS)) {
10569                 u32 rss_key[10];
10570
10571                 tg3_rss_write_indir_tbl(tp);
10572
10573                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10574
10575                 for (i = 0; i < 10 ; i++)
10576                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10577         }
10578
10579         tp->rx_mode = RX_MODE_ENABLE;
10580         if (tg3_flag(tp, 5755_PLUS))
10581                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10582
10583         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10584                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10585
10586         if (tg3_flag(tp, ENABLE_RSS))
10587                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10588                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10589                                RX_MODE_RSS_IPV6_HASH_EN |
10590                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10591                                RX_MODE_RSS_IPV4_HASH_EN |
10592                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10593
10594         tw32_f(MAC_RX_MODE, tp->rx_mode);
10595         udelay(10);
10596
10597         tw32(MAC_LED_CTRL, tp->led_ctrl);
10598
10599         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10600         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10601                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10602                 udelay(10);
10603         }
10604         tw32_f(MAC_RX_MODE, tp->rx_mode);
10605         udelay(10);
10606
10607         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10608                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10609                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10610                         /* Set drive transmission level to 1.2V  */
10611                         /* only if the signal pre-emphasis bit is not set  */
10612                         val = tr32(MAC_SERDES_CFG);
10613                         val &= 0xfffff000;
10614                         val |= 0x880;
10615                         tw32(MAC_SERDES_CFG, val);
10616                 }
10617                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10618                         tw32(MAC_SERDES_CFG, 0x616000);
10619         }
10620
10621         /* Prevent chip from dropping frames when flow control
10622          * is enabled.
10623          */
10624         if (tg3_flag(tp, 57765_CLASS))
10625                 val = 1;
10626         else
10627                 val = 2;
10628         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10629
10630         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10631             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10632                 /* Use hardware link auto-negotiation */
10633                 tg3_flag_set(tp, HW_AUTONEG);
10634         }
10635
10636         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10637             tg3_asic_rev(tp) == ASIC_REV_5714) {
10638                 u32 tmp;
10639
10640                 tmp = tr32(SERDES_RX_CTRL);
10641                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10642                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10643                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10644                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10645         }
10646
10647         if (!tg3_flag(tp, USE_PHYLIB)) {
10648                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10649                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10650
10651                 err = tg3_setup_phy(tp, false);
10652                 if (err)
10653                         return err;
10654
10655                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10656                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10657                         u32 tmp;
10658
10659                         /* Clear CRC stats. */
10660                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10661                                 tg3_writephy(tp, MII_TG3_TEST1,
10662                                              tmp | MII_TG3_TEST1_CRC_EN);
10663                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10664                         }
10665                 }
10666         }
10667
10668         __tg3_set_rx_mode(tp->dev);
10669
10670         /* Initialize receive rules. */
10671         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10672         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10673         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10674         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10675
10676         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10677                 limit = 8;
10678         else
10679                 limit = 16;
10680         if (tg3_flag(tp, ENABLE_ASF))
10681                 limit -= 4;
10682         switch (limit) {
10683         case 16:
10684                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10685         case 15:
10686                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10687         case 14:
10688                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10689         case 13:
10690                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10691         case 12:
10692                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10693         case 11:
10694                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10695         case 10:
10696                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10697         case 9:
10698                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10699         case 8:
10700                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10701         case 7:
10702                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10703         case 6:
10704                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10705         case 5:
10706                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10707         case 4:
10708                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10709         case 3:
10710                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10711         case 2:
10712         case 1:
10713
10714         default:
10715                 break;
10716         }
10717
10718         if (tg3_flag(tp, ENABLE_APE))
10719                 /* Write our heartbeat update interval to APE. */
10720                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10721                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10722
10723         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10724
10725         return 0;
10726 }
10727
10728 /* Called at device open time to get the chip ready for
10729  * packet processing.  Invoked with tp->lock held.
10730  */
10731 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10732 {
10733         /* Chip may have been just powered on. If so, the boot code may still
10734          * be running initialization. Wait for it to finish to avoid races in
10735          * accessing the hardware.
10736          */
10737         tg3_enable_register_access(tp);
10738         tg3_poll_fw(tp);
10739
10740         tg3_switch_clocks(tp);
10741
10742         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10743
10744         return tg3_reset_hw(tp, reset_phy);
10745 }
10746
10747 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10748 {
10749         int i;
10750
10751         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10752                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10753
10754                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10755                 off += len;
10756
10757                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10758                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10759                         memset(ocir, 0, TG3_OCIR_LEN);
10760         }
10761 }
10762
10763 /* sysfs attributes for hwmon */
10764 static ssize_t tg3_show_temp(struct device *dev,
10765                              struct device_attribute *devattr, char *buf)
10766 {
10767         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10768         struct tg3 *tp = dev_get_drvdata(dev);
10769         u32 temperature;
10770
10771         spin_lock_bh(&tp->lock);
10772         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10773                                 sizeof(temperature));
10774         spin_unlock_bh(&tp->lock);
10775         return sprintf(buf, "%u\n", temperature * 1000);
10776 }
10777
10778
10779 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10780                           TG3_TEMP_SENSOR_OFFSET);
10781 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10782                           TG3_TEMP_CAUTION_OFFSET);
10783 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10784                           TG3_TEMP_MAX_OFFSET);
10785
10786 static struct attribute *tg3_attrs[] = {
10787         &sensor_dev_attr_temp1_input.dev_attr.attr,
10788         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10789         &sensor_dev_attr_temp1_max.dev_attr.attr,
10790         NULL
10791 };
10792 ATTRIBUTE_GROUPS(tg3);
10793
10794 static void tg3_hwmon_close(struct tg3 *tp)
10795 {
10796         if (tp->hwmon_dev) {
10797                 hwmon_device_unregister(tp->hwmon_dev);
10798                 tp->hwmon_dev = NULL;
10799         }
10800 }
10801
10802 static void tg3_hwmon_open(struct tg3 *tp)
10803 {
10804         int i;
10805         u32 size = 0;
10806         struct pci_dev *pdev = tp->pdev;
10807         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10808
10809         tg3_sd_scan_scratchpad(tp, ocirs);
10810
10811         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10812                 if (!ocirs[i].src_data_length)
10813                         continue;
10814
10815                 size += ocirs[i].src_hdr_length;
10816                 size += ocirs[i].src_data_length;
10817         }
10818
10819         if (!size)
10820                 return;
10821
10822         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10823                                                           tp, tg3_groups);
10824         if (IS_ERR(tp->hwmon_dev)) {
10825                 tp->hwmon_dev = NULL;
10826                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10827         }
10828 }
10829
10830
10831 #define TG3_STAT_ADD32(PSTAT, REG) \
10832 do {    u32 __val = tr32(REG); \
10833         (PSTAT)->low += __val; \
10834         if ((PSTAT)->low < __val) \
10835                 (PSTAT)->high += 1; \
10836 } while (0)
10837
10838 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10839 {
10840         struct tg3_hw_stats *sp = tp->hw_stats;
10841
10842         if (!tp->link_up)
10843                 return;
10844
10845         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10846         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10847         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10848         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10849         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10850         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10851         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10852         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10853         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10854         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10855         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10856         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10857         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10858         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10859                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10860                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10861                 u32 val;
10862
10863                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10864                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10865                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10866                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10867         }
10868
10869         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10870         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10871         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10872         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10873         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10874         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10875         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10876         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10877         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10878         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10879         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10880         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10881         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10882         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10883
10884         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10885         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10886             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10887             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10888             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10889                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10890         } else {
10891                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10892                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10893                 if (val) {
10894                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10895                         sp->rx_discards.low += val;
10896                         if (sp->rx_discards.low < val)
10897                                 sp->rx_discards.high += 1;
10898                 }
10899                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10900         }
10901         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10902 }
10903
10904 static void tg3_chk_missed_msi(struct tg3 *tp)
10905 {
10906         u32 i;
10907
10908         for (i = 0; i < tp->irq_cnt; i++) {
10909                 struct tg3_napi *tnapi = &tp->napi[i];
10910
10911                 if (tg3_has_work(tnapi)) {
10912                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10913                             tnapi->last_tx_cons == tnapi->tx_cons) {
10914                                 if (tnapi->chk_msi_cnt < 1) {
10915                                         tnapi->chk_msi_cnt++;
10916                                         return;
10917                                 }
10918                                 tg3_msi(0, tnapi);
10919                         }
10920                 }
10921                 tnapi->chk_msi_cnt = 0;
10922                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10923                 tnapi->last_tx_cons = tnapi->tx_cons;
10924         }
10925 }
10926
10927 static void tg3_timer(unsigned long __opaque)
10928 {
10929         struct tg3 *tp = (struct tg3 *) __opaque;
10930
10931         spin_lock(&tp->lock);
10932
10933         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10934                 spin_unlock(&tp->lock);
10935                 goto restart_timer;
10936         }
10937
10938         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10939             tg3_flag(tp, 57765_CLASS))
10940                 tg3_chk_missed_msi(tp);
10941
10942         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10943                 /* BCM4785: Flush posted writes from GbE to host memory. */
10944                 tr32(HOSTCC_MODE);
10945         }
10946
10947         if (!tg3_flag(tp, TAGGED_STATUS)) {
10948                 /* All of this garbage is because when using non-tagged
10949                  * IRQ status the mailbox/status_block protocol the chip
10950                  * uses with the cpu is race prone.
10951                  */
10952                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10953                         tw32(GRC_LOCAL_CTRL,
10954                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10955                 } else {
10956                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10957                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10958                 }
10959
10960                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10961                         spin_unlock(&tp->lock);
10962                         tg3_reset_task_schedule(tp);
10963                         goto restart_timer;
10964                 }
10965         }
10966
10967         /* This part only runs once per second. */
10968         if (!--tp->timer_counter) {
10969                 if (tg3_flag(tp, 5705_PLUS))
10970                         tg3_periodic_fetch_stats(tp);
10971
10972                 if (tp->setlpicnt && !--tp->setlpicnt)
10973                         tg3_phy_eee_enable(tp);
10974
10975                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10976                         u32 mac_stat;
10977                         int phy_event;
10978
10979                         mac_stat = tr32(MAC_STATUS);
10980
10981                         phy_event = 0;
10982                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10983                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10984                                         phy_event = 1;
10985                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10986                                 phy_event = 1;
10987
10988                         if (phy_event)
10989                                 tg3_setup_phy(tp, false);
10990                 } else if (tg3_flag(tp, POLL_SERDES)) {
10991                         u32 mac_stat = tr32(MAC_STATUS);
10992                         int need_setup = 0;
10993
10994                         if (tp->link_up &&
10995                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10996                                 need_setup = 1;
10997                         }
10998                         if (!tp->link_up &&
10999                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11000                                          MAC_STATUS_SIGNAL_DET))) {
11001                                 need_setup = 1;
11002                         }
11003                         if (need_setup) {
11004                                 if (!tp->serdes_counter) {
11005                                         tw32_f(MAC_MODE,
11006                                              (tp->mac_mode &
11007                                               ~MAC_MODE_PORT_MODE_MASK));
11008                                         udelay(40);
11009                                         tw32_f(MAC_MODE, tp->mac_mode);
11010                                         udelay(40);
11011                                 }
11012                                 tg3_setup_phy(tp, false);
11013                         }
11014                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11015                            tg3_flag(tp, 5780_CLASS)) {
11016                         tg3_serdes_parallel_detect(tp);
11017                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11018                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11019                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11020                                          TG3_CPMU_STATUS_LINK_MASK);
11021
11022                         if (link_up != tp->link_up)
11023                                 tg3_setup_phy(tp, false);
11024                 }
11025
11026                 tp->timer_counter = tp->timer_multiplier;
11027         }
11028
11029         /* Heartbeat is only sent once every 2 seconds.
11030          *
11031          * The heartbeat is to tell the ASF firmware that the host
11032          * driver is still alive.  In the event that the OS crashes,
11033          * ASF needs to reset the hardware to free up the FIFO space
11034          * that may be filled with rx packets destined for the host.
11035          * If the FIFO is full, ASF will no longer function properly.
11036          *
11037          * Unintended resets have been reported on real time kernels
11038          * where the timer doesn't run on time.  Netpoll will also have
11039          * same problem.
11040          *
11041          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11042          * to check the ring condition when the heartbeat is expiring
11043          * before doing the reset.  This will prevent most unintended
11044          * resets.
11045          */
11046         if (!--tp->asf_counter) {
11047                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11048                         tg3_wait_for_event_ack(tp);
11049
11050                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11051                                       FWCMD_NICDRV_ALIVE3);
11052                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11053                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11054                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11055
11056                         tg3_generate_fw_event(tp);
11057                 }
11058                 tp->asf_counter = tp->asf_multiplier;
11059         }
11060
11061         spin_unlock(&tp->lock);
11062
11063 restart_timer:
11064         tp->timer.expires = jiffies + tp->timer_offset;
11065         add_timer(&tp->timer);
11066 }
11067
11068 static void tg3_timer_init(struct tg3 *tp)
11069 {
11070         if (tg3_flag(tp, TAGGED_STATUS) &&
11071             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11072             !tg3_flag(tp, 57765_CLASS))
11073                 tp->timer_offset = HZ;
11074         else
11075                 tp->timer_offset = HZ / 10;
11076
11077         BUG_ON(tp->timer_offset > HZ);
11078
11079         tp->timer_multiplier = (HZ / tp->timer_offset);
11080         tp->asf_multiplier = (HZ / tp->timer_offset) *
11081                              TG3_FW_UPDATE_FREQ_SEC;
11082
11083         init_timer(&tp->timer);
11084         tp->timer.data = (unsigned long) tp;
11085         tp->timer.function = tg3_timer;
11086 }
11087
11088 static void tg3_timer_start(struct tg3 *tp)
11089 {
11090         tp->asf_counter   = tp->asf_multiplier;
11091         tp->timer_counter = tp->timer_multiplier;
11092
11093         tp->timer.expires = jiffies + tp->timer_offset;
11094         add_timer(&tp->timer);
11095 }
11096
11097 static void tg3_timer_stop(struct tg3 *tp)
11098 {
11099         del_timer_sync(&tp->timer);
11100 }
11101
11102 /* Restart hardware after configuration changes, self-test, etc.
11103  * Invoked with tp->lock held.
11104  */
11105 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11106         __releases(tp->lock)
11107         __acquires(tp->lock)
11108 {
11109         int err;
11110
11111         err = tg3_init_hw(tp, reset_phy);
11112         if (err) {
11113                 netdev_err(tp->dev,
11114                            "Failed to re-initialize device, aborting\n");
11115                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11116                 tg3_full_unlock(tp);
11117                 tg3_timer_stop(tp);
11118                 tp->irq_sync = 0;
11119                 tg3_napi_enable(tp);
11120                 dev_close(tp->dev);
11121                 tg3_full_lock(tp, 0);
11122         }
11123         return err;
11124 }
11125
11126 static void tg3_reset_task(struct work_struct *work)
11127 {
11128         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11129         int err;
11130
11131         rtnl_lock();
11132         tg3_full_lock(tp, 0);
11133
11134         if (!netif_running(tp->dev)) {
11135                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11136                 tg3_full_unlock(tp);
11137                 rtnl_unlock();
11138                 return;
11139         }
11140
11141         tg3_full_unlock(tp);
11142
11143         tg3_phy_stop(tp);
11144
11145         tg3_netif_stop(tp);
11146
11147         tg3_full_lock(tp, 1);
11148
11149         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11150                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11151                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11152                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11153                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11154         }
11155
11156         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11157         err = tg3_init_hw(tp, true);
11158         if (err)
11159                 goto out;
11160
11161         tg3_netif_start(tp);
11162
11163 out:
11164         tg3_full_unlock(tp);
11165
11166         if (!err)
11167                 tg3_phy_start(tp);
11168
11169         tg3_flag_clear(tp, RESET_TASK_PENDING);
11170         rtnl_unlock();
11171 }
11172
11173 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11174 {
11175         irq_handler_t fn;
11176         unsigned long flags;
11177         char *name;
11178         struct tg3_napi *tnapi = &tp->napi[irq_num];
11179
11180         if (tp->irq_cnt == 1)
11181                 name = tp->dev->name;
11182         else {
11183                 name = &tnapi->irq_lbl[0];
11184                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11185                         snprintf(name, IFNAMSIZ,
11186                                  "%s-txrx-%d", tp->dev->name, irq_num);
11187                 else if (tnapi->tx_buffers)
11188                         snprintf(name, IFNAMSIZ,
11189                                  "%s-tx-%d", tp->dev->name, irq_num);
11190                 else if (tnapi->rx_rcb)
11191                         snprintf(name, IFNAMSIZ,
11192                                  "%s-rx-%d", tp->dev->name, irq_num);
11193                 else
11194                         snprintf(name, IFNAMSIZ,
11195                                  "%s-%d", tp->dev->name, irq_num);
11196                 name[IFNAMSIZ-1] = 0;
11197         }
11198
11199         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11200                 fn = tg3_msi;
11201                 if (tg3_flag(tp, 1SHOT_MSI))
11202                         fn = tg3_msi_1shot;
11203                 flags = 0;
11204         } else {
11205                 fn = tg3_interrupt;
11206                 if (tg3_flag(tp, TAGGED_STATUS))
11207                         fn = tg3_interrupt_tagged;
11208                 flags = IRQF_SHARED;
11209         }
11210
11211         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11212 }
11213
11214 static int tg3_test_interrupt(struct tg3 *tp)
11215 {
11216         struct tg3_napi *tnapi = &tp->napi[0];
11217         struct net_device *dev = tp->dev;
11218         int err, i, intr_ok = 0;
11219         u32 val;
11220
11221         if (!netif_running(dev))
11222                 return -ENODEV;
11223
11224         tg3_disable_ints(tp);
11225
11226         free_irq(tnapi->irq_vec, tnapi);
11227
11228         /*
11229          * Turn off MSI one shot mode.  Otherwise this test has no
11230          * observable way to know whether the interrupt was delivered.
11231          */
11232         if (tg3_flag(tp, 57765_PLUS)) {
11233                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11234                 tw32(MSGINT_MODE, val);
11235         }
11236
11237         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11238                           IRQF_SHARED, dev->name, tnapi);
11239         if (err)
11240                 return err;
11241
11242         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11243         tg3_enable_ints(tp);
11244
11245         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11246                tnapi->coal_now);
11247
11248         for (i = 0; i < 5; i++) {
11249                 u32 int_mbox, misc_host_ctrl;
11250
11251                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11252                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11253
11254                 if ((int_mbox != 0) ||
11255                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11256                         intr_ok = 1;
11257                         break;
11258                 }
11259
11260                 if (tg3_flag(tp, 57765_PLUS) &&
11261                     tnapi->hw_status->status_tag != tnapi->last_tag)
11262                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11263
11264                 msleep(10);
11265         }
11266
11267         tg3_disable_ints(tp);
11268
11269         free_irq(tnapi->irq_vec, tnapi);
11270
11271         err = tg3_request_irq(tp, 0);
11272
11273         if (err)
11274                 return err;
11275
11276         if (intr_ok) {
11277                 /* Reenable MSI one shot mode. */
11278                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11279                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11280                         tw32(MSGINT_MODE, val);
11281                 }
11282                 return 0;
11283         }
11284
11285         return -EIO;
11286 }
11287
11288 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11289  * successfully restored
11290  */
11291 static int tg3_test_msi(struct tg3 *tp)
11292 {
11293         int err;
11294         u16 pci_cmd;
11295
11296         if (!tg3_flag(tp, USING_MSI))
11297                 return 0;
11298
11299         /* Turn off SERR reporting in case MSI terminates with Master
11300          * Abort.
11301          */
11302         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11303         pci_write_config_word(tp->pdev, PCI_COMMAND,
11304                               pci_cmd & ~PCI_COMMAND_SERR);
11305
11306         err = tg3_test_interrupt(tp);
11307
11308         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11309
11310         if (!err)
11311                 return 0;
11312
11313         /* other failures */
11314         if (err != -EIO)
11315                 return err;
11316
11317         /* MSI test failed, go back to INTx mode */
11318         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11319                     "to INTx mode. Please report this failure to the PCI "
11320                     "maintainer and include system chipset information\n");
11321
11322         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11323
11324         pci_disable_msi(tp->pdev);
11325
11326         tg3_flag_clear(tp, USING_MSI);
11327         tp->napi[0].irq_vec = tp->pdev->irq;
11328
11329         err = tg3_request_irq(tp, 0);
11330         if (err)
11331                 return err;
11332
11333         /* Need to reset the chip because the MSI cycle may have terminated
11334          * with Master Abort.
11335          */
11336         tg3_full_lock(tp, 1);
11337
11338         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11339         err = tg3_init_hw(tp, true);
11340
11341         tg3_full_unlock(tp);
11342
11343         if (err)
11344                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11345
11346         return err;
11347 }
11348
11349 static int tg3_request_firmware(struct tg3 *tp)
11350 {
11351         const struct tg3_firmware_hdr *fw_hdr;
11352
11353         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11354                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11355                            tp->fw_needed);
11356                 return -ENOENT;
11357         }
11358
11359         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11360
11361         /* Firmware blob starts with version numbers, followed by
11362          * start address and _full_ length including BSS sections
11363          * (which must be longer than the actual data, of course
11364          */
11365
11366         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11367         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11368                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11369                            tp->fw_len, tp->fw_needed);
11370                 release_firmware(tp->fw);
11371                 tp->fw = NULL;
11372                 return -EINVAL;
11373         }
11374
11375         /* We no longer need firmware; we have it. */
11376         tp->fw_needed = NULL;
11377         return 0;
11378 }
11379
11380 static u32 tg3_irq_count(struct tg3 *tp)
11381 {
11382         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11383
11384         if (irq_cnt > 1) {
11385                 /* We want as many rx rings enabled as there are cpus.
11386                  * In multiqueue MSI-X mode, the first MSI-X vector
11387                  * only deals with link interrupts, etc, so we add
11388                  * one to the number of vectors we are requesting.
11389                  */
11390                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11391         }
11392
11393         return irq_cnt;
11394 }
11395
11396 static bool tg3_enable_msix(struct tg3 *tp)
11397 {
11398         int i, rc;
11399         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11400
11401         tp->txq_cnt = tp->txq_req;
11402         tp->rxq_cnt = tp->rxq_req;
11403         if (!tp->rxq_cnt)
11404                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11405         if (tp->rxq_cnt > tp->rxq_max)
11406                 tp->rxq_cnt = tp->rxq_max;
11407
11408         /* Disable multiple TX rings by default.  Simple round-robin hardware
11409          * scheduling of the TX rings can cause starvation of rings with
11410          * small packets when other rings have TSO or jumbo packets.
11411          */
11412         if (!tp->txq_req)
11413                 tp->txq_cnt = 1;
11414
11415         tp->irq_cnt = tg3_irq_count(tp);
11416
11417         for (i = 0; i < tp->irq_max; i++) {
11418                 msix_ent[i].entry  = i;
11419                 msix_ent[i].vector = 0;
11420         }
11421
11422         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11423         if (rc < 0) {
11424                 return false;
11425         } else if (rc < tp->irq_cnt) {
11426                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11427                               tp->irq_cnt, rc);
11428                 tp->irq_cnt = rc;
11429                 tp->rxq_cnt = max(rc - 1, 1);
11430                 if (tp->txq_cnt)
11431                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11432         }
11433
11434         for (i = 0; i < tp->irq_max; i++)
11435                 tp->napi[i].irq_vec = msix_ent[i].vector;
11436
11437         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11438                 pci_disable_msix(tp->pdev);
11439                 return false;
11440         }
11441
11442         if (tp->irq_cnt == 1)
11443                 return true;
11444
11445         tg3_flag_set(tp, ENABLE_RSS);
11446
11447         if (tp->txq_cnt > 1)
11448                 tg3_flag_set(tp, ENABLE_TSS);
11449
11450         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11451
11452         return true;
11453 }
11454
11455 static void tg3_ints_init(struct tg3 *tp)
11456 {
11457         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11458             !tg3_flag(tp, TAGGED_STATUS)) {
11459                 /* All MSI supporting chips should support tagged
11460                  * status.  Assert that this is the case.
11461                  */
11462                 netdev_warn(tp->dev,
11463                             "MSI without TAGGED_STATUS? Not using MSI\n");
11464                 goto defcfg;
11465         }
11466
11467         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11468                 tg3_flag_set(tp, USING_MSIX);
11469         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11470                 tg3_flag_set(tp, USING_MSI);
11471
11472         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11473                 u32 msi_mode = tr32(MSGINT_MODE);
11474                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11475                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11476                 if (!tg3_flag(tp, 1SHOT_MSI))
11477                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11478                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11479         }
11480 defcfg:
11481         if (!tg3_flag(tp, USING_MSIX)) {
11482                 tp->irq_cnt = 1;
11483                 tp->napi[0].irq_vec = tp->pdev->irq;
11484         }
11485
11486         if (tp->irq_cnt == 1) {
11487                 tp->txq_cnt = 1;
11488                 tp->rxq_cnt = 1;
11489                 netif_set_real_num_tx_queues(tp->dev, 1);
11490                 netif_set_real_num_rx_queues(tp->dev, 1);
11491         }
11492 }
11493
11494 static void tg3_ints_fini(struct tg3 *tp)
11495 {
11496         if (tg3_flag(tp, USING_MSIX))
11497                 pci_disable_msix(tp->pdev);
11498         else if (tg3_flag(tp, USING_MSI))
11499                 pci_disable_msi(tp->pdev);
11500         tg3_flag_clear(tp, USING_MSI);
11501         tg3_flag_clear(tp, USING_MSIX);
11502         tg3_flag_clear(tp, ENABLE_RSS);
11503         tg3_flag_clear(tp, ENABLE_TSS);
11504 }
11505
11506 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11507                      bool init)
11508 {
11509         struct net_device *dev = tp->dev;
11510         int i, err;
11511
11512         /*
11513          * Setup interrupts first so we know how
11514          * many NAPI resources to allocate
11515          */
11516         tg3_ints_init(tp);
11517
11518         tg3_rss_check_indir_tbl(tp);
11519
11520         /* The placement of this call is tied
11521          * to the setup and use of Host TX descriptors.
11522          */
11523         err = tg3_alloc_consistent(tp);
11524         if (err)
11525                 goto out_ints_fini;
11526
11527         tg3_napi_init(tp);
11528
11529         tg3_napi_enable(tp);
11530
11531         for (i = 0; i < tp->irq_cnt; i++) {
11532                 struct tg3_napi *tnapi = &tp->napi[i];
11533                 err = tg3_request_irq(tp, i);
11534                 if (err) {
11535                         for (i--; i >= 0; i--) {
11536                                 tnapi = &tp->napi[i];
11537                                 free_irq(tnapi->irq_vec, tnapi);
11538                         }
11539                         goto out_napi_fini;
11540                 }
11541         }
11542
11543         tg3_full_lock(tp, 0);
11544
11545         if (init)
11546                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11547
11548         err = tg3_init_hw(tp, reset_phy);
11549         if (err) {
11550                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11551                 tg3_free_rings(tp);
11552         }
11553
11554         tg3_full_unlock(tp);
11555
11556         if (err)
11557                 goto out_free_irq;
11558
11559         if (test_irq && tg3_flag(tp, USING_MSI)) {
11560                 err = tg3_test_msi(tp);
11561
11562                 if (err) {
11563                         tg3_full_lock(tp, 0);
11564                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11565                         tg3_free_rings(tp);
11566                         tg3_full_unlock(tp);
11567
11568                         goto out_napi_fini;
11569                 }
11570
11571                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11572                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11573
11574                         tw32(PCIE_TRANSACTION_CFG,
11575                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11576                 }
11577         }
11578
11579         tg3_phy_start(tp);
11580
11581         tg3_hwmon_open(tp);
11582
11583         tg3_full_lock(tp, 0);
11584
11585         tg3_timer_start(tp);
11586         tg3_flag_set(tp, INIT_COMPLETE);
11587         tg3_enable_ints(tp);
11588
11589         tg3_ptp_resume(tp);
11590
11591         tg3_full_unlock(tp);
11592
11593         netif_tx_start_all_queues(dev);
11594
11595         /*
11596          * Reset loopback feature if it was turned on while the device was down
11597          * make sure that it's installed properly now.
11598          */
11599         if (dev->features & NETIF_F_LOOPBACK)
11600                 tg3_set_loopback(dev, dev->features);
11601
11602         return 0;
11603
11604 out_free_irq:
11605         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11606                 struct tg3_napi *tnapi = &tp->napi[i];
11607                 free_irq(tnapi->irq_vec, tnapi);
11608         }
11609
11610 out_napi_fini:
11611         tg3_napi_disable(tp);
11612         tg3_napi_fini(tp);
11613         tg3_free_consistent(tp);
11614
11615 out_ints_fini:
11616         tg3_ints_fini(tp);
11617
11618         return err;
11619 }
11620
11621 static void tg3_stop(struct tg3 *tp)
11622 {
11623         int i;
11624
11625         tg3_reset_task_cancel(tp);
11626         tg3_netif_stop(tp);
11627
11628         tg3_timer_stop(tp);
11629
11630         tg3_hwmon_close(tp);
11631
11632         tg3_phy_stop(tp);
11633
11634         tg3_full_lock(tp, 1);
11635
11636         tg3_disable_ints(tp);
11637
11638         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11639         tg3_free_rings(tp);
11640         tg3_flag_clear(tp, INIT_COMPLETE);
11641
11642         tg3_full_unlock(tp);
11643
11644         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11645                 struct tg3_napi *tnapi = &tp->napi[i];
11646                 free_irq(tnapi->irq_vec, tnapi);
11647         }
11648
11649         tg3_ints_fini(tp);
11650
11651         tg3_napi_fini(tp);
11652
11653         tg3_free_consistent(tp);
11654 }
11655
11656 static int tg3_open(struct net_device *dev)
11657 {
11658         struct tg3 *tp = netdev_priv(dev);
11659         int err;
11660
11661         if (tp->pcierr_recovery) {
11662                 netdev_err(dev, "Failed to open device. PCI error recovery "
11663                            "in progress\n");
11664                 return -EAGAIN;
11665         }
11666
11667         if (tp->fw_needed) {
11668                 err = tg3_request_firmware(tp);
11669                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11670                         if (err) {
11671                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11672                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11673                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11674                                 netdev_warn(tp->dev, "EEE capability restored\n");
11675                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11676                         }
11677                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11678                         if (err)
11679                                 return err;
11680                 } else if (err) {
11681                         netdev_warn(tp->dev, "TSO capability disabled\n");
11682                         tg3_flag_clear(tp, TSO_CAPABLE);
11683                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11684                         netdev_notice(tp->dev, "TSO capability restored\n");
11685                         tg3_flag_set(tp, TSO_CAPABLE);
11686                 }
11687         }
11688
11689         tg3_carrier_off(tp);
11690
11691         err = tg3_power_up(tp);
11692         if (err)
11693                 return err;
11694
11695         tg3_full_lock(tp, 0);
11696
11697         tg3_disable_ints(tp);
11698         tg3_flag_clear(tp, INIT_COMPLETE);
11699
11700         tg3_full_unlock(tp);
11701
11702         err = tg3_start(tp,
11703                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11704                         true, true);
11705         if (err) {
11706                 tg3_frob_aux_power(tp, false);
11707                 pci_set_power_state(tp->pdev, PCI_D3hot);
11708         }
11709
11710         return err;
11711 }
11712
11713 static int tg3_close(struct net_device *dev)
11714 {
11715         struct tg3 *tp = netdev_priv(dev);
11716
11717         if (tp->pcierr_recovery) {
11718                 netdev_err(dev, "Failed to close device. PCI error recovery "
11719                            "in progress\n");
11720                 return -EAGAIN;
11721         }
11722
11723         tg3_stop(tp);
11724
11725         /* Clear stats across close / open calls */
11726         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11727         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11728
11729         if (pci_device_is_present(tp->pdev)) {
11730                 tg3_power_down_prepare(tp);
11731
11732                 tg3_carrier_off(tp);
11733         }
11734         return 0;
11735 }
11736
11737 static inline u64 get_stat64(tg3_stat64_t *val)
11738 {
11739        return ((u64)val->high << 32) | ((u64)val->low);
11740 }
11741
11742 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11743 {
11744         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11745
11746         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11747             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11748              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11749                 u32 val;
11750
11751                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11752                         tg3_writephy(tp, MII_TG3_TEST1,
11753                                      val | MII_TG3_TEST1_CRC_EN);
11754                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11755                 } else
11756                         val = 0;
11757
11758                 tp->phy_crc_errors += val;
11759
11760                 return tp->phy_crc_errors;
11761         }
11762
11763         return get_stat64(&hw_stats->rx_fcs_errors);
11764 }
11765
11766 #define ESTAT_ADD(member) \
11767         estats->member =        old_estats->member + \
11768                                 get_stat64(&hw_stats->member)
11769
11770 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11771 {
11772         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11773         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11774
11775         ESTAT_ADD(rx_octets);
11776         ESTAT_ADD(rx_fragments);
11777         ESTAT_ADD(rx_ucast_packets);
11778         ESTAT_ADD(rx_mcast_packets);
11779         ESTAT_ADD(rx_bcast_packets);
11780         ESTAT_ADD(rx_fcs_errors);
11781         ESTAT_ADD(rx_align_errors);
11782         ESTAT_ADD(rx_xon_pause_rcvd);
11783         ESTAT_ADD(rx_xoff_pause_rcvd);
11784         ESTAT_ADD(rx_mac_ctrl_rcvd);
11785         ESTAT_ADD(rx_xoff_entered);
11786         ESTAT_ADD(rx_frame_too_long_errors);
11787         ESTAT_ADD(rx_jabbers);
11788         ESTAT_ADD(rx_undersize_packets);
11789         ESTAT_ADD(rx_in_length_errors);
11790         ESTAT_ADD(rx_out_length_errors);
11791         ESTAT_ADD(rx_64_or_less_octet_packets);
11792         ESTAT_ADD(rx_65_to_127_octet_packets);
11793         ESTAT_ADD(rx_128_to_255_octet_packets);
11794         ESTAT_ADD(rx_256_to_511_octet_packets);
11795         ESTAT_ADD(rx_512_to_1023_octet_packets);
11796         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11797         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11798         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11799         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11800         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11801
11802         ESTAT_ADD(tx_octets);
11803         ESTAT_ADD(tx_collisions);
11804         ESTAT_ADD(tx_xon_sent);
11805         ESTAT_ADD(tx_xoff_sent);
11806         ESTAT_ADD(tx_flow_control);
11807         ESTAT_ADD(tx_mac_errors);
11808         ESTAT_ADD(tx_single_collisions);
11809         ESTAT_ADD(tx_mult_collisions);
11810         ESTAT_ADD(tx_deferred);
11811         ESTAT_ADD(tx_excessive_collisions);
11812         ESTAT_ADD(tx_late_collisions);
11813         ESTAT_ADD(tx_collide_2times);
11814         ESTAT_ADD(tx_collide_3times);
11815         ESTAT_ADD(tx_collide_4times);
11816         ESTAT_ADD(tx_collide_5times);
11817         ESTAT_ADD(tx_collide_6times);
11818         ESTAT_ADD(tx_collide_7times);
11819         ESTAT_ADD(tx_collide_8times);
11820         ESTAT_ADD(tx_collide_9times);
11821         ESTAT_ADD(tx_collide_10times);
11822         ESTAT_ADD(tx_collide_11times);
11823         ESTAT_ADD(tx_collide_12times);
11824         ESTAT_ADD(tx_collide_13times);
11825         ESTAT_ADD(tx_collide_14times);
11826         ESTAT_ADD(tx_collide_15times);
11827         ESTAT_ADD(tx_ucast_packets);
11828         ESTAT_ADD(tx_mcast_packets);
11829         ESTAT_ADD(tx_bcast_packets);
11830         ESTAT_ADD(tx_carrier_sense_errors);
11831         ESTAT_ADD(tx_discards);
11832         ESTAT_ADD(tx_errors);
11833
11834         ESTAT_ADD(dma_writeq_full);
11835         ESTAT_ADD(dma_write_prioq_full);
11836         ESTAT_ADD(rxbds_empty);
11837         ESTAT_ADD(rx_discards);
11838         ESTAT_ADD(rx_errors);
11839         ESTAT_ADD(rx_threshold_hit);
11840
11841         ESTAT_ADD(dma_readq_full);
11842         ESTAT_ADD(dma_read_prioq_full);
11843         ESTAT_ADD(tx_comp_queue_full);
11844
11845         ESTAT_ADD(ring_set_send_prod_index);
11846         ESTAT_ADD(ring_status_update);
11847         ESTAT_ADD(nic_irqs);
11848         ESTAT_ADD(nic_avoided_irqs);
11849         ESTAT_ADD(nic_tx_threshold_hit);
11850
11851         ESTAT_ADD(mbuf_lwm_thresh_hit);
11852 }
11853
11854 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11855 {
11856         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11857         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11858
11859         stats->rx_packets = old_stats->rx_packets +
11860                 get_stat64(&hw_stats->rx_ucast_packets) +
11861                 get_stat64(&hw_stats->rx_mcast_packets) +
11862                 get_stat64(&hw_stats->rx_bcast_packets);
11863
11864         stats->tx_packets = old_stats->tx_packets +
11865                 get_stat64(&hw_stats->tx_ucast_packets) +
11866                 get_stat64(&hw_stats->tx_mcast_packets) +
11867                 get_stat64(&hw_stats->tx_bcast_packets);
11868
11869         stats->rx_bytes = old_stats->rx_bytes +
11870                 get_stat64(&hw_stats->rx_octets);
11871         stats->tx_bytes = old_stats->tx_bytes +
11872                 get_stat64(&hw_stats->tx_octets);
11873
11874         stats->rx_errors = old_stats->rx_errors +
11875                 get_stat64(&hw_stats->rx_errors);
11876         stats->tx_errors = old_stats->tx_errors +
11877                 get_stat64(&hw_stats->tx_errors) +
11878                 get_stat64(&hw_stats->tx_mac_errors) +
11879                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11880                 get_stat64(&hw_stats->tx_discards);
11881
11882         stats->multicast = old_stats->multicast +
11883                 get_stat64(&hw_stats->rx_mcast_packets);
11884         stats->collisions = old_stats->collisions +
11885                 get_stat64(&hw_stats->tx_collisions);
11886
11887         stats->rx_length_errors = old_stats->rx_length_errors +
11888                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11889                 get_stat64(&hw_stats->rx_undersize_packets);
11890
11891         stats->rx_frame_errors = old_stats->rx_frame_errors +
11892                 get_stat64(&hw_stats->rx_align_errors);
11893         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11894                 get_stat64(&hw_stats->tx_discards);
11895         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11896                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11897
11898         stats->rx_crc_errors = old_stats->rx_crc_errors +
11899                 tg3_calc_crc_errors(tp);
11900
11901         stats->rx_missed_errors = old_stats->rx_missed_errors +
11902                 get_stat64(&hw_stats->rx_discards);
11903
11904         stats->rx_dropped = tp->rx_dropped;
11905         stats->tx_dropped = tp->tx_dropped;
11906 }
11907
11908 static int tg3_get_regs_len(struct net_device *dev)
11909 {
11910         return TG3_REG_BLK_SIZE;
11911 }
11912
11913 static void tg3_get_regs(struct net_device *dev,
11914                 struct ethtool_regs *regs, void *_p)
11915 {
11916         struct tg3 *tp = netdev_priv(dev);
11917
11918         regs->version = 0;
11919
11920         memset(_p, 0, TG3_REG_BLK_SIZE);
11921
11922         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11923                 return;
11924
11925         tg3_full_lock(tp, 0);
11926
11927         tg3_dump_legacy_regs(tp, (u32 *)_p);
11928
11929         tg3_full_unlock(tp);
11930 }
11931
11932 static int tg3_get_eeprom_len(struct net_device *dev)
11933 {
11934         struct tg3 *tp = netdev_priv(dev);
11935
11936         return tp->nvram_size;
11937 }
11938
11939 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11940 {
11941         struct tg3 *tp = netdev_priv(dev);
11942         int ret, cpmu_restore = 0;
11943         u8  *pd;
11944         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11945         __be32 val;
11946
11947         if (tg3_flag(tp, NO_NVRAM))
11948                 return -EINVAL;
11949
11950         offset = eeprom->offset;
11951         len = eeprom->len;
11952         eeprom->len = 0;
11953
11954         eeprom->magic = TG3_EEPROM_MAGIC;
11955
11956         /* Override clock, link aware and link idle modes */
11957         if (tg3_flag(tp, CPMU_PRESENT)) {
11958                 cpmu_val = tr32(TG3_CPMU_CTRL);
11959                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11960                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11961                         tw32(TG3_CPMU_CTRL, cpmu_val &
11962                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11963                                              CPMU_CTRL_LINK_IDLE_MODE));
11964                         cpmu_restore = 1;
11965                 }
11966         }
11967         tg3_override_clk(tp);
11968
11969         if (offset & 3) {
11970                 /* adjustments to start on required 4 byte boundary */
11971                 b_offset = offset & 3;
11972                 b_count = 4 - b_offset;
11973                 if (b_count > len) {
11974                         /* i.e. offset=1 len=2 */
11975                         b_count = len;
11976                 }
11977                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11978                 if (ret)
11979                         goto eeprom_done;
11980                 memcpy(data, ((char *)&val) + b_offset, b_count);
11981                 len -= b_count;
11982                 offset += b_count;
11983                 eeprom->len += b_count;
11984         }
11985
11986         /* read bytes up to the last 4 byte boundary */
11987         pd = &data[eeprom->len];
11988         for (i = 0; i < (len - (len & 3)); i += 4) {
11989                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11990                 if (ret) {
11991                         if (i)
11992                                 i -= 4;
11993                         eeprom->len += i;
11994                         goto eeprom_done;
11995                 }
11996                 memcpy(pd + i, &val, 4);
11997                 if (need_resched()) {
11998                         if (signal_pending(current)) {
11999                                 eeprom->len += i;
12000                                 ret = -EINTR;
12001                                 goto eeprom_done;
12002                         }
12003                         cond_resched();
12004                 }
12005         }
12006         eeprom->len += i;
12007
12008         if (len & 3) {
12009                 /* read last bytes not ending on 4 byte boundary */
12010                 pd = &data[eeprom->len];
12011                 b_count = len & 3;
12012                 b_offset = offset + len - b_count;
12013                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12014                 if (ret)
12015                         goto eeprom_done;
12016                 memcpy(pd, &val, b_count);
12017                 eeprom->len += b_count;
12018         }
12019         ret = 0;
12020
12021 eeprom_done:
12022         /* Restore clock, link aware and link idle modes */
12023         tg3_restore_clk(tp);
12024         if (cpmu_restore)
12025                 tw32(TG3_CPMU_CTRL, cpmu_val);
12026
12027         return ret;
12028 }
12029
12030 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12031 {
12032         struct tg3 *tp = netdev_priv(dev);
12033         int ret;
12034         u32 offset, len, b_offset, odd_len;
12035         u8 *buf;
12036         __be32 start = 0, end;
12037
12038         if (tg3_flag(tp, NO_NVRAM) ||
12039             eeprom->magic != TG3_EEPROM_MAGIC)
12040                 return -EINVAL;
12041
12042         offset = eeprom->offset;
12043         len = eeprom->len;
12044
12045         if ((b_offset = (offset & 3))) {
12046                 /* adjustments to start on required 4 byte boundary */
12047                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12048                 if (ret)
12049                         return ret;
12050                 len += b_offset;
12051                 offset &= ~3;
12052                 if (len < 4)
12053                         len = 4;
12054         }
12055
12056         odd_len = 0;
12057         if (len & 3) {
12058                 /* adjustments to end on required 4 byte boundary */
12059                 odd_len = 1;
12060                 len = (len + 3) & ~3;
12061                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12062                 if (ret)
12063                         return ret;
12064         }
12065
12066         buf = data;
12067         if (b_offset || odd_len) {
12068                 buf = kmalloc(len, GFP_KERNEL);
12069                 if (!buf)
12070                         return -ENOMEM;
12071                 if (b_offset)
12072                         memcpy(buf, &start, 4);
12073                 if (odd_len)
12074                         memcpy(buf+len-4, &end, 4);
12075                 memcpy(buf + b_offset, data, eeprom->len);
12076         }
12077
12078         ret = tg3_nvram_write_block(tp, offset, len, buf);
12079
12080         if (buf != data)
12081                 kfree(buf);
12082
12083         return ret;
12084 }
12085
12086 static int tg3_get_link_ksettings(struct net_device *dev,
12087                                   struct ethtool_link_ksettings *cmd)
12088 {
12089         struct tg3 *tp = netdev_priv(dev);
12090         u32 supported, advertising;
12091
12092         if (tg3_flag(tp, USE_PHYLIB)) {
12093                 struct phy_device *phydev;
12094                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12095                         return -EAGAIN;
12096                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12097                 return phy_ethtool_ksettings_get(phydev, cmd);
12098         }
12099
12100         supported = (SUPPORTED_Autoneg);
12101
12102         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12103                 supported |= (SUPPORTED_1000baseT_Half |
12104                               SUPPORTED_1000baseT_Full);
12105
12106         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12107                 supported |= (SUPPORTED_100baseT_Half |
12108                               SUPPORTED_100baseT_Full |
12109                               SUPPORTED_10baseT_Half |
12110                               SUPPORTED_10baseT_Full |
12111                               SUPPORTED_TP);
12112                 cmd->base.port = PORT_TP;
12113         } else {
12114                 supported |= SUPPORTED_FIBRE;
12115                 cmd->base.port = PORT_FIBRE;
12116         }
12117         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12118                                                 supported);
12119
12120         advertising = tp->link_config.advertising;
12121         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12122                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12123                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12124                                 advertising |= ADVERTISED_Pause;
12125                         } else {
12126                                 advertising |= ADVERTISED_Pause |
12127                                         ADVERTISED_Asym_Pause;
12128                         }
12129                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12130                         advertising |= ADVERTISED_Asym_Pause;
12131                 }
12132         }
12133         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12134                                                 advertising);
12135
12136         if (netif_running(dev) && tp->link_up) {
12137                 cmd->base.speed = tp->link_config.active_speed;
12138                 cmd->base.duplex = tp->link_config.active_duplex;
12139                 ethtool_convert_legacy_u32_to_link_mode(
12140                         cmd->link_modes.lp_advertising,
12141                         tp->link_config.rmt_adv);
12142
12143                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12144                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12145                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12146                         else
12147                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12148                 }
12149         } else {
12150                 cmd->base.speed = SPEED_UNKNOWN;
12151                 cmd->base.duplex = DUPLEX_UNKNOWN;
12152                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12153         }
12154         cmd->base.phy_address = tp->phy_addr;
12155         cmd->base.autoneg = tp->link_config.autoneg;
12156         return 0;
12157 }
12158
12159 static int tg3_set_link_ksettings(struct net_device *dev,
12160                                   const struct ethtool_link_ksettings *cmd)
12161 {
12162         struct tg3 *tp = netdev_priv(dev);
12163         u32 speed = cmd->base.speed;
12164         u32 advertising;
12165
12166         if (tg3_flag(tp, USE_PHYLIB)) {
12167                 struct phy_device *phydev;
12168                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12169                         return -EAGAIN;
12170                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12171                 return phy_ethtool_ksettings_set(phydev, cmd);
12172         }
12173
12174         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12175             cmd->base.autoneg != AUTONEG_DISABLE)
12176                 return -EINVAL;
12177
12178         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12179             cmd->base.duplex != DUPLEX_FULL &&
12180             cmd->base.duplex != DUPLEX_HALF)
12181                 return -EINVAL;
12182
12183         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12184                                                 cmd->link_modes.advertising);
12185
12186         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12187                 u32 mask = ADVERTISED_Autoneg |
12188                            ADVERTISED_Pause |
12189                            ADVERTISED_Asym_Pause;
12190
12191                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12192                         mask |= ADVERTISED_1000baseT_Half |
12193                                 ADVERTISED_1000baseT_Full;
12194
12195                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12196                         mask |= ADVERTISED_100baseT_Half |
12197                                 ADVERTISED_100baseT_Full |
12198                                 ADVERTISED_10baseT_Half |
12199                                 ADVERTISED_10baseT_Full |
12200                                 ADVERTISED_TP;
12201                 else
12202                         mask |= ADVERTISED_FIBRE;
12203
12204                 if (advertising & ~mask)
12205                         return -EINVAL;
12206
12207                 mask &= (ADVERTISED_1000baseT_Half |
12208                          ADVERTISED_1000baseT_Full |
12209                          ADVERTISED_100baseT_Half |
12210                          ADVERTISED_100baseT_Full |
12211                          ADVERTISED_10baseT_Half |
12212                          ADVERTISED_10baseT_Full);
12213
12214                 advertising &= mask;
12215         } else {
12216                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12217                         if (speed != SPEED_1000)
12218                                 return -EINVAL;
12219
12220                         if (cmd->base.duplex != DUPLEX_FULL)
12221                                 return -EINVAL;
12222                 } else {
12223                         if (speed != SPEED_100 &&
12224                             speed != SPEED_10)
12225                                 return -EINVAL;
12226                 }
12227         }
12228
12229         tg3_full_lock(tp, 0);
12230
12231         tp->link_config.autoneg = cmd->base.autoneg;
12232         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12233                 tp->link_config.advertising = (advertising |
12234                                               ADVERTISED_Autoneg);
12235                 tp->link_config.speed = SPEED_UNKNOWN;
12236                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12237         } else {
12238                 tp->link_config.advertising = 0;
12239                 tp->link_config.speed = speed;
12240                 tp->link_config.duplex = cmd->base.duplex;
12241         }
12242
12243         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12244
12245         tg3_warn_mgmt_link_flap(tp);
12246
12247         if (netif_running(dev))
12248                 tg3_setup_phy(tp, true);
12249
12250         tg3_full_unlock(tp);
12251
12252         return 0;
12253 }
12254
12255 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12256 {
12257         struct tg3 *tp = netdev_priv(dev);
12258
12259         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12260         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12261         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12262         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12263 }
12264
12265 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12266 {
12267         struct tg3 *tp = netdev_priv(dev);
12268
12269         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12270                 wol->supported = WAKE_MAGIC;
12271         else
12272                 wol->supported = 0;
12273         wol->wolopts = 0;
12274         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12275                 wol->wolopts = WAKE_MAGIC;
12276         memset(&wol->sopass, 0, sizeof(wol->sopass));
12277 }
12278
12279 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12280 {
12281         struct tg3 *tp = netdev_priv(dev);
12282         struct device *dp = &tp->pdev->dev;
12283
12284         if (wol->wolopts & ~WAKE_MAGIC)
12285                 return -EINVAL;
12286         if ((wol->wolopts & WAKE_MAGIC) &&
12287             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12288                 return -EINVAL;
12289
12290         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12291
12292         if (device_may_wakeup(dp))
12293                 tg3_flag_set(tp, WOL_ENABLE);
12294         else
12295                 tg3_flag_clear(tp, WOL_ENABLE);
12296
12297         return 0;
12298 }
12299
12300 static u32 tg3_get_msglevel(struct net_device *dev)
12301 {
12302         struct tg3 *tp = netdev_priv(dev);
12303         return tp->msg_enable;
12304 }
12305
12306 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12307 {
12308         struct tg3 *tp = netdev_priv(dev);
12309         tp->msg_enable = value;
12310 }
12311
12312 static int tg3_nway_reset(struct net_device *dev)
12313 {
12314         struct tg3 *tp = netdev_priv(dev);
12315         int r;
12316
12317         if (!netif_running(dev))
12318                 return -EAGAIN;
12319
12320         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12321                 return -EINVAL;
12322
12323         tg3_warn_mgmt_link_flap(tp);
12324
12325         if (tg3_flag(tp, USE_PHYLIB)) {
12326                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12327                         return -EAGAIN;
12328                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12329         } else {
12330                 u32 bmcr;
12331
12332                 spin_lock_bh(&tp->lock);
12333                 r = -EINVAL;
12334                 tg3_readphy(tp, MII_BMCR, &bmcr);
12335                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12336                     ((bmcr & BMCR_ANENABLE) ||
12337                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12338                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12339                                                    BMCR_ANENABLE);
12340                         r = 0;
12341                 }
12342                 spin_unlock_bh(&tp->lock);
12343         }
12344
12345         return r;
12346 }
12347
12348 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12349 {
12350         struct tg3 *tp = netdev_priv(dev);
12351
12352         ering->rx_max_pending = tp->rx_std_ring_mask;
12353         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12354                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12355         else
12356                 ering->rx_jumbo_max_pending = 0;
12357
12358         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12359
12360         ering->rx_pending = tp->rx_pending;
12361         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12362                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12363         else
12364                 ering->rx_jumbo_pending = 0;
12365
12366         ering->tx_pending = tp->napi[0].tx_pending;
12367 }
12368
12369 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12370 {
12371         struct tg3 *tp = netdev_priv(dev);
12372         int i, irq_sync = 0, err = 0;
12373
12374         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12375             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12376             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12377             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12378             (tg3_flag(tp, TSO_BUG) &&
12379              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12380                 return -EINVAL;
12381
12382         if (netif_running(dev)) {
12383                 tg3_phy_stop(tp);
12384                 tg3_netif_stop(tp);
12385                 irq_sync = 1;
12386         }
12387
12388         tg3_full_lock(tp, irq_sync);
12389
12390         tp->rx_pending = ering->rx_pending;
12391
12392         if (tg3_flag(tp, MAX_RXPEND_64) &&
12393             tp->rx_pending > 63)
12394                 tp->rx_pending = 63;
12395
12396         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12397                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12398
12399         for (i = 0; i < tp->irq_max; i++)
12400                 tp->napi[i].tx_pending = ering->tx_pending;
12401
12402         if (netif_running(dev)) {
12403                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12404                 err = tg3_restart_hw(tp, false);
12405                 if (!err)
12406                         tg3_netif_start(tp);
12407         }
12408
12409         tg3_full_unlock(tp);
12410
12411         if (irq_sync && !err)
12412                 tg3_phy_start(tp);
12413
12414         return err;
12415 }
12416
12417 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12418 {
12419         struct tg3 *tp = netdev_priv(dev);
12420
12421         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12422
12423         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12424                 epause->rx_pause = 1;
12425         else
12426                 epause->rx_pause = 0;
12427
12428         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12429                 epause->tx_pause = 1;
12430         else
12431                 epause->tx_pause = 0;
12432 }
12433
12434 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12435 {
12436         struct tg3 *tp = netdev_priv(dev);
12437         int err = 0;
12438
12439         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12440                 tg3_warn_mgmt_link_flap(tp);
12441
12442         if (tg3_flag(tp, USE_PHYLIB)) {
12443                 u32 newadv;
12444                 struct phy_device *phydev;
12445
12446                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12447
12448                 if (!(phydev->supported & SUPPORTED_Pause) ||
12449                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12450                      (epause->rx_pause != epause->tx_pause)))
12451                         return -EINVAL;
12452
12453                 tp->link_config.flowctrl = 0;
12454                 if (epause->rx_pause) {
12455                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12456
12457                         if (epause->tx_pause) {
12458                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12459                                 newadv = ADVERTISED_Pause;
12460                         } else
12461                                 newadv = ADVERTISED_Pause |
12462                                          ADVERTISED_Asym_Pause;
12463                 } else if (epause->tx_pause) {
12464                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12465                         newadv = ADVERTISED_Asym_Pause;
12466                 } else
12467                         newadv = 0;
12468
12469                 if (epause->autoneg)
12470                         tg3_flag_set(tp, PAUSE_AUTONEG);
12471                 else
12472                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12473
12474                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12475                         u32 oldadv = phydev->advertising &
12476                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12477                         if (oldadv != newadv) {
12478                                 phydev->advertising &=
12479                                         ~(ADVERTISED_Pause |
12480                                           ADVERTISED_Asym_Pause);
12481                                 phydev->advertising |= newadv;
12482                                 if (phydev->autoneg) {
12483                                         /*
12484                                          * Always renegotiate the link to
12485                                          * inform our link partner of our
12486                                          * flow control settings, even if the
12487                                          * flow control is forced.  Let
12488                                          * tg3_adjust_link() do the final
12489                                          * flow control setup.
12490                                          */
12491                                         return phy_start_aneg(phydev);
12492                                 }
12493                         }
12494
12495                         if (!epause->autoneg)
12496                                 tg3_setup_flow_control(tp, 0, 0);
12497                 } else {
12498                         tp->link_config.advertising &=
12499                                         ~(ADVERTISED_Pause |
12500                                           ADVERTISED_Asym_Pause);
12501                         tp->link_config.advertising |= newadv;
12502                 }
12503         } else {
12504                 int irq_sync = 0;
12505
12506                 if (netif_running(dev)) {
12507                         tg3_netif_stop(tp);
12508                         irq_sync = 1;
12509                 }
12510
12511                 tg3_full_lock(tp, irq_sync);
12512
12513                 if (epause->autoneg)
12514                         tg3_flag_set(tp, PAUSE_AUTONEG);
12515                 else
12516                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12517                 if (epause->rx_pause)
12518                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12519                 else
12520                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12521                 if (epause->tx_pause)
12522                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12523                 else
12524                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12525
12526                 if (netif_running(dev)) {
12527                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12528                         err = tg3_restart_hw(tp, false);
12529                         if (!err)
12530                                 tg3_netif_start(tp);
12531                 }
12532
12533                 tg3_full_unlock(tp);
12534         }
12535
12536         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12537
12538         return err;
12539 }
12540
12541 static int tg3_get_sset_count(struct net_device *dev, int sset)
12542 {
12543         switch (sset) {
12544         case ETH_SS_TEST:
12545                 return TG3_NUM_TEST;
12546         case ETH_SS_STATS:
12547                 return TG3_NUM_STATS;
12548         default:
12549                 return -EOPNOTSUPP;
12550         }
12551 }
12552
12553 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12554                          u32 *rules __always_unused)
12555 {
12556         struct tg3 *tp = netdev_priv(dev);
12557
12558         if (!tg3_flag(tp, SUPPORT_MSIX))
12559                 return -EOPNOTSUPP;
12560
12561         switch (info->cmd) {
12562         case ETHTOOL_GRXRINGS:
12563                 if (netif_running(tp->dev))
12564                         info->data = tp->rxq_cnt;
12565                 else {
12566                         info->data = num_online_cpus();
12567                         if (info->data > TG3_RSS_MAX_NUM_QS)
12568                                 info->data = TG3_RSS_MAX_NUM_QS;
12569                 }
12570
12571                 return 0;
12572
12573         default:
12574                 return -EOPNOTSUPP;
12575         }
12576 }
12577
12578 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12579 {
12580         u32 size = 0;
12581         struct tg3 *tp = netdev_priv(dev);
12582
12583         if (tg3_flag(tp, SUPPORT_MSIX))
12584                 size = TG3_RSS_INDIR_TBL_SIZE;
12585
12586         return size;
12587 }
12588
12589 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12590 {
12591         struct tg3 *tp = netdev_priv(dev);
12592         int i;
12593
12594         if (hfunc)
12595                 *hfunc = ETH_RSS_HASH_TOP;
12596         if (!indir)
12597                 return 0;
12598
12599         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12600                 indir[i] = tp->rss_ind_tbl[i];
12601
12602         return 0;
12603 }
12604
12605 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12606                         const u8 hfunc)
12607 {
12608         struct tg3 *tp = netdev_priv(dev);
12609         size_t i;
12610
12611         /* We require at least one supported parameter to be changed and no
12612          * change in any of the unsupported parameters
12613          */
12614         if (key ||
12615             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12616                 return -EOPNOTSUPP;
12617
12618         if (!indir)
12619                 return 0;
12620
12621         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12622                 tp->rss_ind_tbl[i] = indir[i];
12623
12624         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12625                 return 0;
12626
12627         /* It is legal to write the indirection
12628          * table while the device is running.
12629          */
12630         tg3_full_lock(tp, 0);
12631         tg3_rss_write_indir_tbl(tp);
12632         tg3_full_unlock(tp);
12633
12634         return 0;
12635 }
12636
12637 static void tg3_get_channels(struct net_device *dev,
12638                              struct ethtool_channels *channel)
12639 {
12640         struct tg3 *tp = netdev_priv(dev);
12641         u32 deflt_qs = netif_get_num_default_rss_queues();
12642
12643         channel->max_rx = tp->rxq_max;
12644         channel->max_tx = tp->txq_max;
12645
12646         if (netif_running(dev)) {
12647                 channel->rx_count = tp->rxq_cnt;
12648                 channel->tx_count = tp->txq_cnt;
12649         } else {
12650                 if (tp->rxq_req)
12651                         channel->rx_count = tp->rxq_req;
12652                 else
12653                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12654
12655                 if (tp->txq_req)
12656                         channel->tx_count = tp->txq_req;
12657                 else
12658                         channel->tx_count = min(deflt_qs, tp->txq_max);
12659         }
12660 }
12661
12662 static int tg3_set_channels(struct net_device *dev,
12663                             struct ethtool_channels *channel)
12664 {
12665         struct tg3 *tp = netdev_priv(dev);
12666
12667         if (!tg3_flag(tp, SUPPORT_MSIX))
12668                 return -EOPNOTSUPP;
12669
12670         if (channel->rx_count > tp->rxq_max ||
12671             channel->tx_count > tp->txq_max)
12672                 return -EINVAL;
12673
12674         tp->rxq_req = channel->rx_count;
12675         tp->txq_req = channel->tx_count;
12676
12677         if (!netif_running(dev))
12678                 return 0;
12679
12680         tg3_stop(tp);
12681
12682         tg3_carrier_off(tp);
12683
12684         tg3_start(tp, true, false, false);
12685
12686         return 0;
12687 }
12688
12689 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12690 {
12691         switch (stringset) {
12692         case ETH_SS_STATS:
12693                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12694                 break;
12695         case ETH_SS_TEST:
12696                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12697                 break;
12698         default:
12699                 WARN_ON(1);     /* we need a WARN() */
12700                 break;
12701         }
12702 }
12703
12704 static int tg3_set_phys_id(struct net_device *dev,
12705                             enum ethtool_phys_id_state state)
12706 {
12707         struct tg3 *tp = netdev_priv(dev);
12708
12709         if (!netif_running(tp->dev))
12710                 return -EAGAIN;
12711
12712         switch (state) {
12713         case ETHTOOL_ID_ACTIVE:
12714                 return 1;       /* cycle on/off once per second */
12715
12716         case ETHTOOL_ID_ON:
12717                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12718                      LED_CTRL_1000MBPS_ON |
12719                      LED_CTRL_100MBPS_ON |
12720                      LED_CTRL_10MBPS_ON |
12721                      LED_CTRL_TRAFFIC_OVERRIDE |
12722                      LED_CTRL_TRAFFIC_BLINK |
12723                      LED_CTRL_TRAFFIC_LED);
12724                 break;
12725
12726         case ETHTOOL_ID_OFF:
12727                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12728                      LED_CTRL_TRAFFIC_OVERRIDE);
12729                 break;
12730
12731         case ETHTOOL_ID_INACTIVE:
12732                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12733                 break;
12734         }
12735
12736         return 0;
12737 }
12738
12739 static void tg3_get_ethtool_stats(struct net_device *dev,
12740                                    struct ethtool_stats *estats, u64 *tmp_stats)
12741 {
12742         struct tg3 *tp = netdev_priv(dev);
12743
12744         if (tp->hw_stats)
12745                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12746         else
12747                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12748 }
12749
12750 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12751 {
12752         int i;
12753         __be32 *buf;
12754         u32 offset = 0, len = 0;
12755         u32 magic, val;
12756
12757         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12758                 return NULL;
12759
12760         if (magic == TG3_EEPROM_MAGIC) {
12761                 for (offset = TG3_NVM_DIR_START;
12762                      offset < TG3_NVM_DIR_END;
12763                      offset += TG3_NVM_DIRENT_SIZE) {
12764                         if (tg3_nvram_read(tp, offset, &val))
12765                                 return NULL;
12766
12767                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12768                             TG3_NVM_DIRTYPE_EXTVPD)
12769                                 break;
12770                 }
12771
12772                 if (offset != TG3_NVM_DIR_END) {
12773                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12774                         if (tg3_nvram_read(tp, offset + 4, &offset))
12775                                 return NULL;
12776
12777                         offset = tg3_nvram_logical_addr(tp, offset);
12778                 }
12779         }
12780
12781         if (!offset || !len) {
12782                 offset = TG3_NVM_VPD_OFF;
12783                 len = TG3_NVM_VPD_LEN;
12784         }
12785
12786         buf = kmalloc(len, GFP_KERNEL);
12787         if (buf == NULL)
12788                 return NULL;
12789
12790         if (magic == TG3_EEPROM_MAGIC) {
12791                 for (i = 0; i < len; i += 4) {
12792                         /* The data is in little-endian format in NVRAM.
12793                          * Use the big-endian read routines to preserve
12794                          * the byte order as it exists in NVRAM.
12795                          */
12796                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12797                                 goto error;
12798                 }
12799         } else {
12800                 u8 *ptr;
12801                 ssize_t cnt;
12802                 unsigned int pos = 0;
12803
12804                 ptr = (u8 *)&buf[0];
12805                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12806                         cnt = pci_read_vpd(tp->pdev, pos,
12807                                            len - pos, ptr);
12808                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12809                                 cnt = 0;
12810                         else if (cnt < 0)
12811                                 goto error;
12812                 }
12813                 if (pos != len)
12814                         goto error;
12815         }
12816
12817         *vpdlen = len;
12818
12819         return buf;
12820
12821 error:
12822         kfree(buf);
12823         return NULL;
12824 }
12825
12826 #define NVRAM_TEST_SIZE 0x100
12827 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12828 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12829 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12830 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12831 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12832 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12833 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12834 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12835
12836 static int tg3_test_nvram(struct tg3 *tp)
12837 {
12838         u32 csum, magic, len;
12839         __be32 *buf;
12840         int i, j, k, err = 0, size;
12841
12842         if (tg3_flag(tp, NO_NVRAM))
12843                 return 0;
12844
12845         if (tg3_nvram_read(tp, 0, &magic) != 0)
12846                 return -EIO;
12847
12848         if (magic == TG3_EEPROM_MAGIC)
12849                 size = NVRAM_TEST_SIZE;
12850         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12851                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12852                     TG3_EEPROM_SB_FORMAT_1) {
12853                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12854                         case TG3_EEPROM_SB_REVISION_0:
12855                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12856                                 break;
12857                         case TG3_EEPROM_SB_REVISION_2:
12858                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12859                                 break;
12860                         case TG3_EEPROM_SB_REVISION_3:
12861                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12862                                 break;
12863                         case TG3_EEPROM_SB_REVISION_4:
12864                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12865                                 break;
12866                         case TG3_EEPROM_SB_REVISION_5:
12867                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12868                                 break;
12869                         case TG3_EEPROM_SB_REVISION_6:
12870                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12871                                 break;
12872                         default:
12873                                 return -EIO;
12874                         }
12875                 } else
12876                         return 0;
12877         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12878                 size = NVRAM_SELFBOOT_HW_SIZE;
12879         else
12880                 return -EIO;
12881
12882         buf = kmalloc(size, GFP_KERNEL);
12883         if (buf == NULL)
12884                 return -ENOMEM;
12885
12886         err = -EIO;
12887         for (i = 0, j = 0; i < size; i += 4, j++) {
12888                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12889                 if (err)
12890                         break;
12891         }
12892         if (i < size)
12893                 goto out;
12894
12895         /* Selfboot format */
12896         magic = be32_to_cpu(buf[0]);
12897         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12898             TG3_EEPROM_MAGIC_FW) {
12899                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12900
12901                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12902                     TG3_EEPROM_SB_REVISION_2) {
12903                         /* For rev 2, the csum doesn't include the MBA. */
12904                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12905                                 csum8 += buf8[i];
12906                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12907                                 csum8 += buf8[i];
12908                 } else {
12909                         for (i = 0; i < size; i++)
12910                                 csum8 += buf8[i];
12911                 }
12912
12913                 if (csum8 == 0) {
12914                         err = 0;
12915                         goto out;
12916                 }
12917
12918                 err = -EIO;
12919                 goto out;
12920         }
12921
12922         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12923             TG3_EEPROM_MAGIC_HW) {
12924                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12925                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12926                 u8 *buf8 = (u8 *) buf;
12927
12928                 /* Separate the parity bits and the data bytes.  */
12929                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12930                         if ((i == 0) || (i == 8)) {
12931                                 int l;
12932                                 u8 msk;
12933
12934                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12935                                         parity[k++] = buf8[i] & msk;
12936                                 i++;
12937                         } else if (i == 16) {
12938                                 int l;
12939                                 u8 msk;
12940
12941                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12942                                         parity[k++] = buf8[i] & msk;
12943                                 i++;
12944
12945                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12946                                         parity[k++] = buf8[i] & msk;
12947                                 i++;
12948                         }
12949                         data[j++] = buf8[i];
12950                 }
12951
12952                 err = -EIO;
12953                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12954                         u8 hw8 = hweight8(data[i]);
12955
12956                         if ((hw8 & 0x1) && parity[i])
12957                                 goto out;
12958                         else if (!(hw8 & 0x1) && !parity[i])
12959                                 goto out;
12960                 }
12961                 err = 0;
12962                 goto out;
12963         }
12964
12965         err = -EIO;
12966
12967         /* Bootstrap checksum at offset 0x10 */
12968         csum = calc_crc((unsigned char *) buf, 0x10);
12969         if (csum != le32_to_cpu(buf[0x10/4]))
12970                 goto out;
12971
12972         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12973         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12974         if (csum != le32_to_cpu(buf[0xfc/4]))
12975                 goto out;
12976
12977         kfree(buf);
12978
12979         buf = tg3_vpd_readblock(tp, &len);
12980         if (!buf)
12981                 return -ENOMEM;
12982
12983         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12984         if (i > 0) {
12985                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12986                 if (j < 0)
12987                         goto out;
12988
12989                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12990                         goto out;
12991
12992                 i += PCI_VPD_LRDT_TAG_SIZE;
12993                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12994                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12995                 if (j > 0) {
12996                         u8 csum8 = 0;
12997
12998                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12999
13000                         for (i = 0; i <= j; i++)
13001                                 csum8 += ((u8 *)buf)[i];
13002
13003                         if (csum8)
13004                                 goto out;
13005                 }
13006         }
13007
13008         err = 0;
13009
13010 out:
13011         kfree(buf);
13012         return err;
13013 }
13014
13015 #define TG3_SERDES_TIMEOUT_SEC  2
13016 #define TG3_COPPER_TIMEOUT_SEC  6
13017
13018 static int tg3_test_link(struct tg3 *tp)
13019 {
13020         int i, max;
13021
13022         if (!netif_running(tp->dev))
13023                 return -ENODEV;
13024
13025         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13026                 max = TG3_SERDES_TIMEOUT_SEC;
13027         else
13028                 max = TG3_COPPER_TIMEOUT_SEC;
13029
13030         for (i = 0; i < max; i++) {
13031                 if (tp->link_up)
13032                         return 0;
13033
13034                 if (msleep_interruptible(1000))
13035                         break;
13036         }
13037
13038         return -EIO;
13039 }
13040
13041 /* Only test the commonly used registers */
13042 static int tg3_test_registers(struct tg3 *tp)
13043 {
13044         int i, is_5705, is_5750;
13045         u32 offset, read_mask, write_mask, val, save_val, read_val;
13046         static struct {
13047                 u16 offset;
13048                 u16 flags;
13049 #define TG3_FL_5705     0x1
13050 #define TG3_FL_NOT_5705 0x2
13051 #define TG3_FL_NOT_5788 0x4
13052 #define TG3_FL_NOT_5750 0x8
13053                 u32 read_mask;
13054                 u32 write_mask;
13055         } reg_tbl[] = {
13056                 /* MAC Control Registers */
13057                 { MAC_MODE, TG3_FL_NOT_5705,
13058                         0x00000000, 0x00ef6f8c },
13059                 { MAC_MODE, TG3_FL_5705,
13060                         0x00000000, 0x01ef6b8c },
13061                 { MAC_STATUS, TG3_FL_NOT_5705,
13062                         0x03800107, 0x00000000 },
13063                 { MAC_STATUS, TG3_FL_5705,
13064                         0x03800100, 0x00000000 },
13065                 { MAC_ADDR_0_HIGH, 0x0000,
13066                         0x00000000, 0x0000ffff },
13067                 { MAC_ADDR_0_LOW, 0x0000,
13068                         0x00000000, 0xffffffff },
13069                 { MAC_RX_MTU_SIZE, 0x0000,
13070                         0x00000000, 0x0000ffff },
13071                 { MAC_TX_MODE, 0x0000,
13072                         0x00000000, 0x00000070 },
13073                 { MAC_TX_LENGTHS, 0x0000,
13074                         0x00000000, 0x00003fff },
13075                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13076                         0x00000000, 0x000007fc },
13077                 { MAC_RX_MODE, TG3_FL_5705,
13078                         0x00000000, 0x000007dc },
13079                 { MAC_HASH_REG_0, 0x0000,
13080                         0x00000000, 0xffffffff },
13081                 { MAC_HASH_REG_1, 0x0000,
13082                         0x00000000, 0xffffffff },
13083                 { MAC_HASH_REG_2, 0x0000,
13084                         0x00000000, 0xffffffff },
13085                 { MAC_HASH_REG_3, 0x0000,
13086                         0x00000000, 0xffffffff },
13087
13088                 /* Receive Data and Receive BD Initiator Control Registers. */
13089                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13090                         0x00000000, 0xffffffff },
13091                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13092                         0x00000000, 0xffffffff },
13093                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13094                         0x00000000, 0x00000003 },
13095                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13096                         0x00000000, 0xffffffff },
13097                 { RCVDBDI_STD_BD+0, 0x0000,
13098                         0x00000000, 0xffffffff },
13099                 { RCVDBDI_STD_BD+4, 0x0000,
13100                         0x00000000, 0xffffffff },
13101                 { RCVDBDI_STD_BD+8, 0x0000,
13102                         0x00000000, 0xffff0002 },
13103                 { RCVDBDI_STD_BD+0xc, 0x0000,
13104                         0x00000000, 0xffffffff },
13105
13106                 /* Receive BD Initiator Control Registers. */
13107                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13108                         0x00000000, 0xffffffff },
13109                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13110                         0x00000000, 0x000003ff },
13111                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13112                         0x00000000, 0xffffffff },
13113
13114                 /* Host Coalescing Control Registers. */
13115                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13116                         0x00000000, 0x00000004 },
13117                 { HOSTCC_MODE, TG3_FL_5705,
13118                         0x00000000, 0x000000f6 },
13119                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13120                         0x00000000, 0xffffffff },
13121                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13122                         0x00000000, 0x000003ff },
13123                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13124                         0x00000000, 0xffffffff },
13125                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13126                         0x00000000, 0x000003ff },
13127                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13128                         0x00000000, 0xffffffff },
13129                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13130                         0x00000000, 0x000000ff },
13131                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13132                         0x00000000, 0xffffffff },
13133                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13134                         0x00000000, 0x000000ff },
13135                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13136                         0x00000000, 0xffffffff },
13137                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13138                         0x00000000, 0xffffffff },
13139                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13140                         0x00000000, 0xffffffff },
13141                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13142                         0x00000000, 0x000000ff },
13143                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13144                         0x00000000, 0xffffffff },
13145                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13146                         0x00000000, 0x000000ff },
13147                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13148                         0x00000000, 0xffffffff },
13149                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13150                         0x00000000, 0xffffffff },
13151                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13152                         0x00000000, 0xffffffff },
13153                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13154                         0x00000000, 0xffffffff },
13155                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13156                         0x00000000, 0xffffffff },
13157                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13158                         0xffffffff, 0x00000000 },
13159                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13160                         0xffffffff, 0x00000000 },
13161
13162                 /* Buffer Manager Control Registers. */
13163                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13164                         0x00000000, 0x007fff80 },
13165                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13166                         0x00000000, 0x007fffff },
13167                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13168                         0x00000000, 0x0000003f },
13169                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13170                         0x00000000, 0x000001ff },
13171                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13172                         0x00000000, 0x000001ff },
13173                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13174                         0xffffffff, 0x00000000 },
13175                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13176                         0xffffffff, 0x00000000 },
13177
13178                 /* Mailbox Registers */
13179                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13180                         0x00000000, 0x000001ff },
13181                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13182                         0x00000000, 0x000001ff },
13183                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13184                         0x00000000, 0x000007ff },
13185                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13186                         0x00000000, 0x000001ff },
13187
13188                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13189         };
13190
13191         is_5705 = is_5750 = 0;
13192         if (tg3_flag(tp, 5705_PLUS)) {
13193                 is_5705 = 1;
13194                 if (tg3_flag(tp, 5750_PLUS))
13195                         is_5750 = 1;
13196         }
13197
13198         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13199                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13200                         continue;
13201
13202                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13203                         continue;
13204
13205                 if (tg3_flag(tp, IS_5788) &&
13206                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13207                         continue;
13208
13209                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13210                         continue;
13211
13212                 offset = (u32) reg_tbl[i].offset;
13213                 read_mask = reg_tbl[i].read_mask;
13214                 write_mask = reg_tbl[i].write_mask;
13215
13216                 /* Save the original register content */
13217                 save_val = tr32(offset);
13218
13219                 /* Determine the read-only value. */
13220                 read_val = save_val & read_mask;
13221
13222                 /* Write zero to the register, then make sure the read-only bits
13223                  * are not changed and the read/write bits are all zeros.
13224                  */
13225                 tw32(offset, 0);
13226
13227                 val = tr32(offset);
13228
13229                 /* Test the read-only and read/write bits. */
13230                 if (((val & read_mask) != read_val) || (val & write_mask))
13231                         goto out;
13232
13233                 /* Write ones to all the bits defined by RdMask and WrMask, then
13234                  * make sure the read-only bits are not changed and the
13235                  * read/write bits are all ones.
13236                  */
13237                 tw32(offset, read_mask | write_mask);
13238
13239                 val = tr32(offset);
13240
13241                 /* Test the read-only bits. */
13242                 if ((val & read_mask) != read_val)
13243                         goto out;
13244
13245                 /* Test the read/write bits. */
13246                 if ((val & write_mask) != write_mask)
13247                         goto out;
13248
13249                 tw32(offset, save_val);
13250         }
13251
13252         return 0;
13253
13254 out:
13255         if (netif_msg_hw(tp))
13256                 netdev_err(tp->dev,
13257                            "Register test failed at offset %x\n", offset);
13258         tw32(offset, save_val);
13259         return -EIO;
13260 }
13261
13262 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13263 {
13264         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13265         int i;
13266         u32 j;
13267
13268         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13269                 for (j = 0; j < len; j += 4) {
13270                         u32 val;
13271
13272                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13273                         tg3_read_mem(tp, offset + j, &val);
13274                         if (val != test_pattern[i])
13275                                 return -EIO;
13276                 }
13277         }
13278         return 0;
13279 }
13280
13281 static int tg3_test_memory(struct tg3 *tp)
13282 {
13283         static struct mem_entry {
13284                 u32 offset;
13285                 u32 len;
13286         } mem_tbl_570x[] = {
13287                 { 0x00000000, 0x00b50},
13288                 { 0x00002000, 0x1c000},
13289                 { 0xffffffff, 0x00000}
13290         }, mem_tbl_5705[] = {
13291                 { 0x00000100, 0x0000c},
13292                 { 0x00000200, 0x00008},
13293                 { 0x00004000, 0x00800},
13294                 { 0x00006000, 0x01000},
13295                 { 0x00008000, 0x02000},
13296                 { 0x00010000, 0x0e000},
13297                 { 0xffffffff, 0x00000}
13298         }, mem_tbl_5755[] = {
13299                 { 0x00000200, 0x00008},
13300                 { 0x00004000, 0x00800},
13301                 { 0x00006000, 0x00800},
13302                 { 0x00008000, 0x02000},
13303                 { 0x00010000, 0x0c000},
13304                 { 0xffffffff, 0x00000}
13305         }, mem_tbl_5906[] = {
13306                 { 0x00000200, 0x00008},
13307                 { 0x00004000, 0x00400},
13308                 { 0x00006000, 0x00400},
13309                 { 0x00008000, 0x01000},
13310                 { 0x00010000, 0x01000},
13311                 { 0xffffffff, 0x00000}
13312         }, mem_tbl_5717[] = {
13313                 { 0x00000200, 0x00008},
13314                 { 0x00010000, 0x0a000},
13315                 { 0x00020000, 0x13c00},
13316                 { 0xffffffff, 0x00000}
13317         }, mem_tbl_57765[] = {
13318                 { 0x00000200, 0x00008},
13319                 { 0x00004000, 0x00800},
13320                 { 0x00006000, 0x09800},
13321                 { 0x00010000, 0x0a000},
13322                 { 0xffffffff, 0x00000}
13323         };
13324         struct mem_entry *mem_tbl;
13325         int err = 0;
13326         int i;
13327
13328         if (tg3_flag(tp, 5717_PLUS))
13329                 mem_tbl = mem_tbl_5717;
13330         else if (tg3_flag(tp, 57765_CLASS) ||
13331                  tg3_asic_rev(tp) == ASIC_REV_5762)
13332                 mem_tbl = mem_tbl_57765;
13333         else if (tg3_flag(tp, 5755_PLUS))
13334                 mem_tbl = mem_tbl_5755;
13335         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13336                 mem_tbl = mem_tbl_5906;
13337         else if (tg3_flag(tp, 5705_PLUS))
13338                 mem_tbl = mem_tbl_5705;
13339         else
13340                 mem_tbl = mem_tbl_570x;
13341
13342         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13343                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13344                 if (err)
13345                         break;
13346         }
13347
13348         return err;
13349 }
13350
13351 #define TG3_TSO_MSS             500
13352
13353 #define TG3_TSO_IP_HDR_LEN      20
13354 #define TG3_TSO_TCP_HDR_LEN     20
13355 #define TG3_TSO_TCP_OPT_LEN     12
13356
13357 static const u8 tg3_tso_header[] = {
13358 0x08, 0x00,
13359 0x45, 0x00, 0x00, 0x00,
13360 0x00, 0x00, 0x40, 0x00,
13361 0x40, 0x06, 0x00, 0x00,
13362 0x0a, 0x00, 0x00, 0x01,
13363 0x0a, 0x00, 0x00, 0x02,
13364 0x0d, 0x00, 0xe0, 0x00,
13365 0x00, 0x00, 0x01, 0x00,
13366 0x00, 0x00, 0x02, 0x00,
13367 0x80, 0x10, 0x10, 0x00,
13368 0x14, 0x09, 0x00, 0x00,
13369 0x01, 0x01, 0x08, 0x0a,
13370 0x11, 0x11, 0x11, 0x11,
13371 0x11, 0x11, 0x11, 0x11,
13372 };
13373
13374 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13375 {
13376         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13377         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13378         u32 budget;
13379         struct sk_buff *skb;
13380         u8 *tx_data, *rx_data;
13381         dma_addr_t map;
13382         int num_pkts, tx_len, rx_len, i, err;
13383         struct tg3_rx_buffer_desc *desc;
13384         struct tg3_napi *tnapi, *rnapi;
13385         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13386
13387         tnapi = &tp->napi[0];
13388         rnapi = &tp->napi[0];
13389         if (tp->irq_cnt > 1) {
13390                 if (tg3_flag(tp, ENABLE_RSS))
13391                         rnapi = &tp->napi[1];
13392                 if (tg3_flag(tp, ENABLE_TSS))
13393                         tnapi = &tp->napi[1];
13394         }
13395         coal_now = tnapi->coal_now | rnapi->coal_now;
13396
13397         err = -EIO;
13398
13399         tx_len = pktsz;
13400         skb = netdev_alloc_skb(tp->dev, tx_len);
13401         if (!skb)
13402                 return -ENOMEM;
13403
13404         tx_data = skb_put(skb, tx_len);
13405         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13406         memset(tx_data + ETH_ALEN, 0x0, 8);
13407
13408         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13409
13410         if (tso_loopback) {
13411                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13412
13413                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13414                               TG3_TSO_TCP_OPT_LEN;
13415
13416                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13417                        sizeof(tg3_tso_header));
13418                 mss = TG3_TSO_MSS;
13419
13420                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13421                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13422
13423                 /* Set the total length field in the IP header */
13424                 iph->tot_len = htons((u16)(mss + hdr_len));
13425
13426                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13427                               TXD_FLAG_CPU_POST_DMA);
13428
13429                 if (tg3_flag(tp, HW_TSO_1) ||
13430                     tg3_flag(tp, HW_TSO_2) ||
13431                     tg3_flag(tp, HW_TSO_3)) {
13432                         struct tcphdr *th;
13433                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13434                         th = (struct tcphdr *)&tx_data[val];
13435                         th->check = 0;
13436                 } else
13437                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13438
13439                 if (tg3_flag(tp, HW_TSO_3)) {
13440                         mss |= (hdr_len & 0xc) << 12;
13441                         if (hdr_len & 0x10)
13442                                 base_flags |= 0x00000010;
13443                         base_flags |= (hdr_len & 0x3e0) << 5;
13444                 } else if (tg3_flag(tp, HW_TSO_2))
13445                         mss |= hdr_len << 9;
13446                 else if (tg3_flag(tp, HW_TSO_1) ||
13447                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13448                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13449                 } else {
13450                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13451                 }
13452
13453                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13454         } else {
13455                 num_pkts = 1;
13456                 data_off = ETH_HLEN;
13457
13458                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13459                     tx_len > VLAN_ETH_FRAME_LEN)
13460                         base_flags |= TXD_FLAG_JMB_PKT;
13461         }
13462
13463         for (i = data_off; i < tx_len; i++)
13464                 tx_data[i] = (u8) (i & 0xff);
13465
13466         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13467         if (pci_dma_mapping_error(tp->pdev, map)) {
13468                 dev_kfree_skb(skb);
13469                 return -EIO;
13470         }
13471
13472         val = tnapi->tx_prod;
13473         tnapi->tx_buffers[val].skb = skb;
13474         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13475
13476         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13477                rnapi->coal_now);
13478
13479         udelay(10);
13480
13481         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13482
13483         budget = tg3_tx_avail(tnapi);
13484         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13485                             base_flags | TXD_FLAG_END, mss, 0)) {
13486                 tnapi->tx_buffers[val].skb = NULL;
13487                 dev_kfree_skb(skb);
13488                 return -EIO;
13489         }
13490
13491         tnapi->tx_prod++;
13492
13493         /* Sync BD data before updating mailbox */
13494         wmb();
13495
13496         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13497         tr32_mailbox(tnapi->prodmbox);
13498
13499         udelay(10);
13500
13501         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13502         for (i = 0; i < 35; i++) {
13503                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13504                        coal_now);
13505
13506                 udelay(10);
13507
13508                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13509                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13510                 if ((tx_idx == tnapi->tx_prod) &&
13511                     (rx_idx == (rx_start_idx + num_pkts)))
13512                         break;
13513         }
13514
13515         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13516         dev_kfree_skb(skb);
13517
13518         if (tx_idx != tnapi->tx_prod)
13519                 goto out;
13520
13521         if (rx_idx != rx_start_idx + num_pkts)
13522                 goto out;
13523
13524         val = data_off;
13525         while (rx_idx != rx_start_idx) {
13526                 desc = &rnapi->rx_rcb[rx_start_idx++];
13527                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13528                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13529
13530                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13531                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13532                         goto out;
13533
13534                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13535                          - ETH_FCS_LEN;
13536
13537                 if (!tso_loopback) {
13538                         if (rx_len != tx_len)
13539                                 goto out;
13540
13541                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13542                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13543                                         goto out;
13544                         } else {
13545                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13546                                         goto out;
13547                         }
13548                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13549                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13550                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13551                         goto out;
13552                 }
13553
13554                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13555                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13556                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13557                                              mapping);
13558                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13559                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13560                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13561                                              mapping);
13562                 } else
13563                         goto out;
13564
13565                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13566                                             PCI_DMA_FROMDEVICE);
13567
13568                 rx_data += TG3_RX_OFFSET(tp);
13569                 for (i = data_off; i < rx_len; i++, val++) {
13570                         if (*(rx_data + i) != (u8) (val & 0xff))
13571                                 goto out;
13572                 }
13573         }
13574
13575         err = 0;
13576
13577         /* tg3_free_rings will unmap and free the rx_data */
13578 out:
13579         return err;
13580 }
13581
13582 #define TG3_STD_LOOPBACK_FAILED         1
13583 #define TG3_JMB_LOOPBACK_FAILED         2
13584 #define TG3_TSO_LOOPBACK_FAILED         4
13585 #define TG3_LOOPBACK_FAILED \
13586         (TG3_STD_LOOPBACK_FAILED | \
13587          TG3_JMB_LOOPBACK_FAILED | \
13588          TG3_TSO_LOOPBACK_FAILED)
13589
13590 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13591 {
13592         int err = -EIO;
13593         u32 eee_cap;
13594         u32 jmb_pkt_sz = 9000;
13595
13596         if (tp->dma_limit)
13597                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13598
13599         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13600         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13601
13602         if (!netif_running(tp->dev)) {
13603                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13605                 if (do_extlpbk)
13606                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13607                 goto done;
13608         }
13609
13610         err = tg3_reset_hw(tp, true);
13611         if (err) {
13612                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13613                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13614                 if (do_extlpbk)
13615                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13616                 goto done;
13617         }
13618
13619         if (tg3_flag(tp, ENABLE_RSS)) {
13620                 int i;
13621
13622                 /* Reroute all rx packets to the 1st queue */
13623                 for (i = MAC_RSS_INDIR_TBL_0;
13624                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13625                         tw32(i, 0x0);
13626         }
13627
13628         /* HW errata - mac loopback fails in some cases on 5780.
13629          * Normal traffic and PHY loopback are not affected by
13630          * errata.  Also, the MAC loopback test is deprecated for
13631          * all newer ASIC revisions.
13632          */
13633         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13634             !tg3_flag(tp, CPMU_PRESENT)) {
13635                 tg3_mac_loopback(tp, true);
13636
13637                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13638                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13639
13640                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13641                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13642                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13643
13644                 tg3_mac_loopback(tp, false);
13645         }
13646
13647         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13648             !tg3_flag(tp, USE_PHYLIB)) {
13649                 int i;
13650
13651                 tg3_phy_lpbk_set(tp, 0, false);
13652
13653                 /* Wait for link */
13654                 for (i = 0; i < 100; i++) {
13655                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13656                                 break;
13657                         mdelay(1);
13658                 }
13659
13660                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13661                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13662                 if (tg3_flag(tp, TSO_CAPABLE) &&
13663                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13664                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13665                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13666                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13667                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13668
13669                 if (do_extlpbk) {
13670                         tg3_phy_lpbk_set(tp, 0, true);
13671
13672                         /* All link indications report up, but the hardware
13673                          * isn't really ready for about 20 msec.  Double it
13674                          * to be sure.
13675                          */
13676                         mdelay(40);
13677
13678                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13679                                 data[TG3_EXT_LOOPB_TEST] |=
13680                                                         TG3_STD_LOOPBACK_FAILED;
13681                         if (tg3_flag(tp, TSO_CAPABLE) &&
13682                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13683                                 data[TG3_EXT_LOOPB_TEST] |=
13684                                                         TG3_TSO_LOOPBACK_FAILED;
13685                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13686                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13687                                 data[TG3_EXT_LOOPB_TEST] |=
13688                                                         TG3_JMB_LOOPBACK_FAILED;
13689                 }
13690
13691                 /* Re-enable gphy autopowerdown. */
13692                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13693                         tg3_phy_toggle_apd(tp, true);
13694         }
13695
13696         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13697                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13698
13699 done:
13700         tp->phy_flags |= eee_cap;
13701
13702         return err;
13703 }
13704
13705 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13706                           u64 *data)
13707 {
13708         struct tg3 *tp = netdev_priv(dev);
13709         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13710
13711         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13712                 if (tg3_power_up(tp)) {
13713                         etest->flags |= ETH_TEST_FL_FAILED;
13714                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13715                         return;
13716                 }
13717                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13718         }
13719
13720         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13721
13722         if (tg3_test_nvram(tp) != 0) {
13723                 etest->flags |= ETH_TEST_FL_FAILED;
13724                 data[TG3_NVRAM_TEST] = 1;
13725         }
13726         if (!doextlpbk && tg3_test_link(tp)) {
13727                 etest->flags |= ETH_TEST_FL_FAILED;
13728                 data[TG3_LINK_TEST] = 1;
13729         }
13730         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13731                 int err, err2 = 0, irq_sync = 0;
13732
13733                 if (netif_running(dev)) {
13734                         tg3_phy_stop(tp);
13735                         tg3_netif_stop(tp);
13736                         irq_sync = 1;
13737                 }
13738
13739                 tg3_full_lock(tp, irq_sync);
13740                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13741                 err = tg3_nvram_lock(tp);
13742                 tg3_halt_cpu(tp, RX_CPU_BASE);
13743                 if (!tg3_flag(tp, 5705_PLUS))
13744                         tg3_halt_cpu(tp, TX_CPU_BASE);
13745                 if (!err)
13746                         tg3_nvram_unlock(tp);
13747
13748                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13749                         tg3_phy_reset(tp);
13750
13751                 if (tg3_test_registers(tp) != 0) {
13752                         etest->flags |= ETH_TEST_FL_FAILED;
13753                         data[TG3_REGISTER_TEST] = 1;
13754                 }
13755
13756                 if (tg3_test_memory(tp) != 0) {
13757                         etest->flags |= ETH_TEST_FL_FAILED;
13758                         data[TG3_MEMORY_TEST] = 1;
13759                 }
13760
13761                 if (doextlpbk)
13762                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13763
13764                 if (tg3_test_loopback(tp, data, doextlpbk))
13765                         etest->flags |= ETH_TEST_FL_FAILED;
13766
13767                 tg3_full_unlock(tp);
13768
13769                 if (tg3_test_interrupt(tp) != 0) {
13770                         etest->flags |= ETH_TEST_FL_FAILED;
13771                         data[TG3_INTERRUPT_TEST] = 1;
13772                 }
13773
13774                 tg3_full_lock(tp, 0);
13775
13776                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13777                 if (netif_running(dev)) {
13778                         tg3_flag_set(tp, INIT_COMPLETE);
13779                         err2 = tg3_restart_hw(tp, true);
13780                         if (!err2)
13781                                 tg3_netif_start(tp);
13782                 }
13783
13784                 tg3_full_unlock(tp);
13785
13786                 if (irq_sync && !err2)
13787                         tg3_phy_start(tp);
13788         }
13789         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13790                 tg3_power_down_prepare(tp);
13791
13792 }
13793
13794 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13795 {
13796         struct tg3 *tp = netdev_priv(dev);
13797         struct hwtstamp_config stmpconf;
13798
13799         if (!tg3_flag(tp, PTP_CAPABLE))
13800                 return -EOPNOTSUPP;
13801
13802         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13803                 return -EFAULT;
13804
13805         if (stmpconf.flags)
13806                 return -EINVAL;
13807
13808         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13809             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13810                 return -ERANGE;
13811
13812         switch (stmpconf.rx_filter) {
13813         case HWTSTAMP_FILTER_NONE:
13814                 tp->rxptpctl = 0;
13815                 break;
13816         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13817                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13818                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13819                 break;
13820         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13821                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13822                                TG3_RX_PTP_CTL_SYNC_EVNT;
13823                 break;
13824         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13825                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13826                                TG3_RX_PTP_CTL_DELAY_REQ;
13827                 break;
13828         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13829                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13830                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13831                 break;
13832         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13833                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13834                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13835                 break;
13836         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13837                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13838                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13839                 break;
13840         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13841                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13842                                TG3_RX_PTP_CTL_SYNC_EVNT;
13843                 break;
13844         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13845                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13846                                TG3_RX_PTP_CTL_SYNC_EVNT;
13847                 break;
13848         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13849                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13850                                TG3_RX_PTP_CTL_SYNC_EVNT;
13851                 break;
13852         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13853                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13854                                TG3_RX_PTP_CTL_DELAY_REQ;
13855                 break;
13856         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13857                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13858                                TG3_RX_PTP_CTL_DELAY_REQ;
13859                 break;
13860         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13861                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13862                                TG3_RX_PTP_CTL_DELAY_REQ;
13863                 break;
13864         default:
13865                 return -ERANGE;
13866         }
13867
13868         if (netif_running(dev) && tp->rxptpctl)
13869                 tw32(TG3_RX_PTP_CTL,
13870                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13871
13872         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13873                 tg3_flag_set(tp, TX_TSTAMP_EN);
13874         else
13875                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13876
13877         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13878                 -EFAULT : 0;
13879 }
13880
13881 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13882 {
13883         struct tg3 *tp = netdev_priv(dev);
13884         struct hwtstamp_config stmpconf;
13885
13886         if (!tg3_flag(tp, PTP_CAPABLE))
13887                 return -EOPNOTSUPP;
13888
13889         stmpconf.flags = 0;
13890         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13891                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13892
13893         switch (tp->rxptpctl) {
13894         case 0:
13895                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13896                 break;
13897         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13898                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13899                 break;
13900         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13901                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13902                 break;
13903         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13904                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13905                 break;
13906         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13907                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13908                 break;
13909         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13910                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13911                 break;
13912         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13913                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13914                 break;
13915         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13916                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13917                 break;
13918         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13919                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13920                 break;
13921         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13922                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13923                 break;
13924         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13925                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13926                 break;
13927         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13928                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13929                 break;
13930         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13932                 break;
13933         default:
13934                 WARN_ON_ONCE(1);
13935                 return -ERANGE;
13936         }
13937
13938         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13939                 -EFAULT : 0;
13940 }
13941
13942 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13943 {
13944         struct mii_ioctl_data *data = if_mii(ifr);
13945         struct tg3 *tp = netdev_priv(dev);
13946         int err;
13947
13948         if (tg3_flag(tp, USE_PHYLIB)) {
13949                 struct phy_device *phydev;
13950                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13951                         return -EAGAIN;
13952                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13953                 return phy_mii_ioctl(phydev, ifr, cmd);
13954         }
13955
13956         switch (cmd) {
13957         case SIOCGMIIPHY:
13958                 data->phy_id = tp->phy_addr;
13959
13960                 /* fallthru */
13961         case SIOCGMIIREG: {
13962                 u32 mii_regval;
13963
13964                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13965                         break;                  /* We have no PHY */
13966
13967                 if (!netif_running(dev))
13968                         return -EAGAIN;
13969
13970                 spin_lock_bh(&tp->lock);
13971                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13972                                     data->reg_num & 0x1f, &mii_regval);
13973                 spin_unlock_bh(&tp->lock);
13974
13975                 data->val_out = mii_regval;
13976
13977                 return err;
13978         }
13979
13980         case SIOCSMIIREG:
13981                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13982                         break;                  /* We have no PHY */
13983
13984                 if (!netif_running(dev))
13985                         return -EAGAIN;
13986
13987                 spin_lock_bh(&tp->lock);
13988                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13989                                      data->reg_num & 0x1f, data->val_in);
13990                 spin_unlock_bh(&tp->lock);
13991
13992                 return err;
13993
13994         case SIOCSHWTSTAMP:
13995                 return tg3_hwtstamp_set(dev, ifr);
13996
13997         case SIOCGHWTSTAMP:
13998                 return tg3_hwtstamp_get(dev, ifr);
13999
14000         default:
14001                 /* do nothing */
14002                 break;
14003         }
14004         return -EOPNOTSUPP;
14005 }
14006
14007 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14008 {
14009         struct tg3 *tp = netdev_priv(dev);
14010
14011         memcpy(ec, &tp->coal, sizeof(*ec));
14012         return 0;
14013 }
14014
14015 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14016 {
14017         struct tg3 *tp = netdev_priv(dev);
14018         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14019         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14020
14021         if (!tg3_flag(tp, 5705_PLUS)) {
14022                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14023                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14024                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14025                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14026         }
14027
14028         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14029             (!ec->rx_coalesce_usecs) ||
14030             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14031             (!ec->tx_coalesce_usecs) ||
14032             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14033             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14034             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14035             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14036             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14037             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14038             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14039             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14040                 return -EINVAL;
14041
14042         /* Only copy relevant parameters, ignore all others. */
14043         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14044         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14045         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14046         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14047         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14048         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14049         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14050         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14051         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14052
14053         if (netif_running(dev)) {
14054                 tg3_full_lock(tp, 0);
14055                 __tg3_set_coalesce(tp, &tp->coal);
14056                 tg3_full_unlock(tp);
14057         }
14058         return 0;
14059 }
14060
14061 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14062 {
14063         struct tg3 *tp = netdev_priv(dev);
14064
14065         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14066                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14067                 return -EOPNOTSUPP;
14068         }
14069
14070         if (edata->advertised != tp->eee.advertised) {
14071                 netdev_warn(tp->dev,
14072                             "Direct manipulation of EEE advertisement is not supported\n");
14073                 return -EINVAL;
14074         }
14075
14076         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14077                 netdev_warn(tp->dev,
14078                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14079                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14080                 return -EINVAL;
14081         }
14082
14083         tp->eee = *edata;
14084
14085         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14086         tg3_warn_mgmt_link_flap(tp);
14087
14088         if (netif_running(tp->dev)) {
14089                 tg3_full_lock(tp, 0);
14090                 tg3_setup_eee(tp);
14091                 tg3_phy_reset(tp);
14092                 tg3_full_unlock(tp);
14093         }
14094
14095         return 0;
14096 }
14097
14098 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14099 {
14100         struct tg3 *tp = netdev_priv(dev);
14101
14102         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14103                 netdev_warn(tp->dev,
14104                             "Board does not support EEE!\n");
14105                 return -EOPNOTSUPP;
14106         }
14107
14108         *edata = tp->eee;
14109         return 0;
14110 }
14111
14112 static const struct ethtool_ops tg3_ethtool_ops = {
14113         .get_drvinfo            = tg3_get_drvinfo,
14114         .get_regs_len           = tg3_get_regs_len,
14115         .get_regs               = tg3_get_regs,
14116         .get_wol                = tg3_get_wol,
14117         .set_wol                = tg3_set_wol,
14118         .get_msglevel           = tg3_get_msglevel,
14119         .set_msglevel           = tg3_set_msglevel,
14120         .nway_reset             = tg3_nway_reset,
14121         .get_link               = ethtool_op_get_link,
14122         .get_eeprom_len         = tg3_get_eeprom_len,
14123         .get_eeprom             = tg3_get_eeprom,
14124         .set_eeprom             = tg3_set_eeprom,
14125         .get_ringparam          = tg3_get_ringparam,
14126         .set_ringparam          = tg3_set_ringparam,
14127         .get_pauseparam         = tg3_get_pauseparam,
14128         .set_pauseparam         = tg3_set_pauseparam,
14129         .self_test              = tg3_self_test,
14130         .get_strings            = tg3_get_strings,
14131         .set_phys_id            = tg3_set_phys_id,
14132         .get_ethtool_stats      = tg3_get_ethtool_stats,
14133         .get_coalesce           = tg3_get_coalesce,
14134         .set_coalesce           = tg3_set_coalesce,
14135         .get_sset_count         = tg3_get_sset_count,
14136         .get_rxnfc              = tg3_get_rxnfc,
14137         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14138         .get_rxfh               = tg3_get_rxfh,
14139         .set_rxfh               = tg3_set_rxfh,
14140         .get_channels           = tg3_get_channels,
14141         .set_channels           = tg3_set_channels,
14142         .get_ts_info            = tg3_get_ts_info,
14143         .get_eee                = tg3_get_eee,
14144         .set_eee                = tg3_set_eee,
14145         .get_link_ksettings     = tg3_get_link_ksettings,
14146         .set_link_ksettings     = tg3_set_link_ksettings,
14147 };
14148
14149 static void tg3_get_stats64(struct net_device *dev,
14150                             struct rtnl_link_stats64 *stats)
14151 {
14152         struct tg3 *tp = netdev_priv(dev);
14153
14154         spin_lock_bh(&tp->lock);
14155         if (!tp->hw_stats) {
14156                 *stats = tp->net_stats_prev;
14157                 spin_unlock_bh(&tp->lock);
14158                 return;
14159         }
14160
14161         tg3_get_nstats(tp, stats);
14162         spin_unlock_bh(&tp->lock);
14163 }
14164
14165 static void tg3_set_rx_mode(struct net_device *dev)
14166 {
14167         struct tg3 *tp = netdev_priv(dev);
14168
14169         if (!netif_running(dev))
14170                 return;
14171
14172         tg3_full_lock(tp, 0);
14173         __tg3_set_rx_mode(dev);
14174         tg3_full_unlock(tp);
14175 }
14176
14177 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14178                                int new_mtu)
14179 {
14180         dev->mtu = new_mtu;
14181
14182         if (new_mtu > ETH_DATA_LEN) {
14183                 if (tg3_flag(tp, 5780_CLASS)) {
14184                         netdev_update_features(dev);
14185                         tg3_flag_clear(tp, TSO_CAPABLE);
14186                 } else {
14187                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14188                 }
14189         } else {
14190                 if (tg3_flag(tp, 5780_CLASS)) {
14191                         tg3_flag_set(tp, TSO_CAPABLE);
14192                         netdev_update_features(dev);
14193                 }
14194                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14195         }
14196 }
14197
14198 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14199 {
14200         struct tg3 *tp = netdev_priv(dev);
14201         int err;
14202         bool reset_phy = false;
14203
14204         if (!netif_running(dev)) {
14205                 /* We'll just catch it later when the
14206                  * device is up'd.
14207                  */
14208                 tg3_set_mtu(dev, tp, new_mtu);
14209                 return 0;
14210         }
14211
14212         tg3_phy_stop(tp);
14213
14214         tg3_netif_stop(tp);
14215
14216         tg3_set_mtu(dev, tp, new_mtu);
14217
14218         tg3_full_lock(tp, 1);
14219
14220         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14221
14222         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14223          * breaks all requests to 256 bytes.
14224          */
14225         if (tg3_asic_rev(tp) == ASIC_REV_57766)
14226                 reset_phy = true;
14227
14228         err = tg3_restart_hw(tp, reset_phy);
14229
14230         if (!err)
14231                 tg3_netif_start(tp);
14232
14233         tg3_full_unlock(tp);
14234
14235         if (!err)
14236                 tg3_phy_start(tp);
14237
14238         return err;
14239 }
14240
14241 static const struct net_device_ops tg3_netdev_ops = {
14242         .ndo_open               = tg3_open,
14243         .ndo_stop               = tg3_close,
14244         .ndo_start_xmit         = tg3_start_xmit,
14245         .ndo_get_stats64        = tg3_get_stats64,
14246         .ndo_validate_addr      = eth_validate_addr,
14247         .ndo_set_rx_mode        = tg3_set_rx_mode,
14248         .ndo_set_mac_address    = tg3_set_mac_addr,
14249         .ndo_do_ioctl           = tg3_ioctl,
14250         .ndo_tx_timeout         = tg3_tx_timeout,
14251         .ndo_change_mtu         = tg3_change_mtu,
14252         .ndo_fix_features       = tg3_fix_features,
14253         .ndo_set_features       = tg3_set_features,
14254 #ifdef CONFIG_NET_POLL_CONTROLLER
14255         .ndo_poll_controller    = tg3_poll_controller,
14256 #endif
14257 };
14258
14259 static void tg3_get_eeprom_size(struct tg3 *tp)
14260 {
14261         u32 cursize, val, magic;
14262
14263         tp->nvram_size = EEPROM_CHIP_SIZE;
14264
14265         if (tg3_nvram_read(tp, 0, &magic) != 0)
14266                 return;
14267
14268         if ((magic != TG3_EEPROM_MAGIC) &&
14269             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14270             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14271                 return;
14272
14273         /*
14274          * Size the chip by reading offsets at increasing powers of two.
14275          * When we encounter our validation signature, we know the addressing
14276          * has wrapped around, and thus have our chip size.
14277          */
14278         cursize = 0x10;
14279
14280         while (cursize < tp->nvram_size) {
14281                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14282                         return;
14283
14284                 if (val == magic)
14285                         break;
14286
14287                 cursize <<= 1;
14288         }
14289
14290         tp->nvram_size = cursize;
14291 }
14292
14293 static void tg3_get_nvram_size(struct tg3 *tp)
14294 {
14295         u32 val;
14296
14297         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14298                 return;
14299
14300         /* Selfboot format */
14301         if (val != TG3_EEPROM_MAGIC) {
14302                 tg3_get_eeprom_size(tp);
14303                 return;
14304         }
14305
14306         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14307                 if (val != 0) {
14308                         /* This is confusing.  We want to operate on the
14309                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14310                          * call will read from NVRAM and byteswap the data
14311                          * according to the byteswapping settings for all
14312                          * other register accesses.  This ensures the data we
14313                          * want will always reside in the lower 16-bits.
14314                          * However, the data in NVRAM is in LE format, which
14315                          * means the data from the NVRAM read will always be
14316                          * opposite the endianness of the CPU.  The 16-bit
14317                          * byteswap then brings the data to CPU endianness.
14318                          */
14319                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14320                         return;
14321                 }
14322         }
14323         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14324 }
14325
14326 static void tg3_get_nvram_info(struct tg3 *tp)
14327 {
14328         u32 nvcfg1;
14329
14330         nvcfg1 = tr32(NVRAM_CFG1);
14331         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14332                 tg3_flag_set(tp, FLASH);
14333         } else {
14334                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14335                 tw32(NVRAM_CFG1, nvcfg1);
14336         }
14337
14338         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14339             tg3_flag(tp, 5780_CLASS)) {
14340                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14341                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14342                         tp->nvram_jedecnum = JEDEC_ATMEL;
14343                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14344                         tg3_flag_set(tp, NVRAM_BUFFERED);
14345                         break;
14346                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14347                         tp->nvram_jedecnum = JEDEC_ATMEL;
14348                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14349                         break;
14350                 case FLASH_VENDOR_ATMEL_EEPROM:
14351                         tp->nvram_jedecnum = JEDEC_ATMEL;
14352                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14353                         tg3_flag_set(tp, NVRAM_BUFFERED);
14354                         break;
14355                 case FLASH_VENDOR_ST:
14356                         tp->nvram_jedecnum = JEDEC_ST;
14357                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14358                         tg3_flag_set(tp, NVRAM_BUFFERED);
14359                         break;
14360                 case FLASH_VENDOR_SAIFUN:
14361                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14362                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14363                         break;
14364                 case FLASH_VENDOR_SST_SMALL:
14365                 case FLASH_VENDOR_SST_LARGE:
14366                         tp->nvram_jedecnum = JEDEC_SST;
14367                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14368                         break;
14369                 }
14370         } else {
14371                 tp->nvram_jedecnum = JEDEC_ATMEL;
14372                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14373                 tg3_flag_set(tp, NVRAM_BUFFERED);
14374         }
14375 }
14376
14377 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14378 {
14379         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14380         case FLASH_5752PAGE_SIZE_256:
14381                 tp->nvram_pagesize = 256;
14382                 break;
14383         case FLASH_5752PAGE_SIZE_512:
14384                 tp->nvram_pagesize = 512;
14385                 break;
14386         case FLASH_5752PAGE_SIZE_1K:
14387                 tp->nvram_pagesize = 1024;
14388                 break;
14389         case FLASH_5752PAGE_SIZE_2K:
14390                 tp->nvram_pagesize = 2048;
14391                 break;
14392         case FLASH_5752PAGE_SIZE_4K:
14393                 tp->nvram_pagesize = 4096;
14394                 break;
14395         case FLASH_5752PAGE_SIZE_264:
14396                 tp->nvram_pagesize = 264;
14397                 break;
14398         case FLASH_5752PAGE_SIZE_528:
14399                 tp->nvram_pagesize = 528;
14400                 break;
14401         }
14402 }
14403
14404 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14405 {
14406         u32 nvcfg1;
14407
14408         nvcfg1 = tr32(NVRAM_CFG1);
14409
14410         /* NVRAM protection for TPM */
14411         if (nvcfg1 & (1 << 27))
14412                 tg3_flag_set(tp, PROTECTED_NVRAM);
14413
14414         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14415         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14416         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14417                 tp->nvram_jedecnum = JEDEC_ATMEL;
14418                 tg3_flag_set(tp, NVRAM_BUFFERED);
14419                 break;
14420         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14421                 tp->nvram_jedecnum = JEDEC_ATMEL;
14422                 tg3_flag_set(tp, NVRAM_BUFFERED);
14423                 tg3_flag_set(tp, FLASH);
14424                 break;
14425         case FLASH_5752VENDOR_ST_M45PE10:
14426         case FLASH_5752VENDOR_ST_M45PE20:
14427         case FLASH_5752VENDOR_ST_M45PE40:
14428                 tp->nvram_jedecnum = JEDEC_ST;
14429                 tg3_flag_set(tp, NVRAM_BUFFERED);
14430                 tg3_flag_set(tp, FLASH);
14431                 break;
14432         }
14433
14434         if (tg3_flag(tp, FLASH)) {
14435                 tg3_nvram_get_pagesize(tp, nvcfg1);
14436         } else {
14437                 /* For eeprom, set pagesize to maximum eeprom size */
14438                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14439
14440                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14441                 tw32(NVRAM_CFG1, nvcfg1);
14442         }
14443 }
14444
14445 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14446 {
14447         u32 nvcfg1, protect = 0;
14448
14449         nvcfg1 = tr32(NVRAM_CFG1);
14450
14451         /* NVRAM protection for TPM */
14452         if (nvcfg1 & (1 << 27)) {
14453                 tg3_flag_set(tp, PROTECTED_NVRAM);
14454                 protect = 1;
14455         }
14456
14457         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14458         switch (nvcfg1) {
14459         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14460         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14461         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14462         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14463                 tp->nvram_jedecnum = JEDEC_ATMEL;
14464                 tg3_flag_set(tp, NVRAM_BUFFERED);
14465                 tg3_flag_set(tp, FLASH);
14466                 tp->nvram_pagesize = 264;
14467                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14468                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14469                         tp->nvram_size = (protect ? 0x3e200 :
14470                                           TG3_NVRAM_SIZE_512KB);
14471                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14472                         tp->nvram_size = (protect ? 0x1f200 :
14473                                           TG3_NVRAM_SIZE_256KB);
14474                 else
14475                         tp->nvram_size = (protect ? 0x1f200 :
14476                                           TG3_NVRAM_SIZE_128KB);
14477                 break;
14478         case FLASH_5752VENDOR_ST_M45PE10:
14479         case FLASH_5752VENDOR_ST_M45PE20:
14480         case FLASH_5752VENDOR_ST_M45PE40:
14481                 tp->nvram_jedecnum = JEDEC_ST;
14482                 tg3_flag_set(tp, NVRAM_BUFFERED);
14483                 tg3_flag_set(tp, FLASH);
14484                 tp->nvram_pagesize = 256;
14485                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14486                         tp->nvram_size = (protect ?
14487                                           TG3_NVRAM_SIZE_64KB :
14488                                           TG3_NVRAM_SIZE_128KB);
14489                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14490                         tp->nvram_size = (protect ?
14491                                           TG3_NVRAM_SIZE_64KB :
14492                                           TG3_NVRAM_SIZE_256KB);
14493                 else
14494                         tp->nvram_size = (protect ?
14495                                           TG3_NVRAM_SIZE_128KB :
14496                                           TG3_NVRAM_SIZE_512KB);
14497                 break;
14498         }
14499 }
14500
14501 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14502 {
14503         u32 nvcfg1;
14504
14505         nvcfg1 = tr32(NVRAM_CFG1);
14506
14507         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14508         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14509         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14510         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14511         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14512                 tp->nvram_jedecnum = JEDEC_ATMEL;
14513                 tg3_flag_set(tp, NVRAM_BUFFERED);
14514                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14515
14516                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14517                 tw32(NVRAM_CFG1, nvcfg1);
14518                 break;
14519         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14520         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14521         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14522         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14523                 tp->nvram_jedecnum = JEDEC_ATMEL;
14524                 tg3_flag_set(tp, NVRAM_BUFFERED);
14525                 tg3_flag_set(tp, FLASH);
14526                 tp->nvram_pagesize = 264;
14527                 break;
14528         case FLASH_5752VENDOR_ST_M45PE10:
14529         case FLASH_5752VENDOR_ST_M45PE20:
14530         case FLASH_5752VENDOR_ST_M45PE40:
14531                 tp->nvram_jedecnum = JEDEC_ST;
14532                 tg3_flag_set(tp, NVRAM_BUFFERED);
14533                 tg3_flag_set(tp, FLASH);
14534                 tp->nvram_pagesize = 256;
14535                 break;
14536         }
14537 }
14538
14539 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14540 {
14541         u32 nvcfg1, protect = 0;
14542
14543         nvcfg1 = tr32(NVRAM_CFG1);
14544
14545         /* NVRAM protection for TPM */
14546         if (nvcfg1 & (1 << 27)) {
14547                 tg3_flag_set(tp, PROTECTED_NVRAM);
14548                 protect = 1;
14549         }
14550
14551         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14552         switch (nvcfg1) {
14553         case FLASH_5761VENDOR_ATMEL_ADB021D:
14554         case FLASH_5761VENDOR_ATMEL_ADB041D:
14555         case FLASH_5761VENDOR_ATMEL_ADB081D:
14556         case FLASH_5761VENDOR_ATMEL_ADB161D:
14557         case FLASH_5761VENDOR_ATMEL_MDB021D:
14558         case FLASH_5761VENDOR_ATMEL_MDB041D:
14559         case FLASH_5761VENDOR_ATMEL_MDB081D:
14560         case FLASH_5761VENDOR_ATMEL_MDB161D:
14561                 tp->nvram_jedecnum = JEDEC_ATMEL;
14562                 tg3_flag_set(tp, NVRAM_BUFFERED);
14563                 tg3_flag_set(tp, FLASH);
14564                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14565                 tp->nvram_pagesize = 256;
14566                 break;
14567         case FLASH_5761VENDOR_ST_A_M45PE20:
14568         case FLASH_5761VENDOR_ST_A_M45PE40:
14569         case FLASH_5761VENDOR_ST_A_M45PE80:
14570         case FLASH_5761VENDOR_ST_A_M45PE16:
14571         case FLASH_5761VENDOR_ST_M_M45PE20:
14572         case FLASH_5761VENDOR_ST_M_M45PE40:
14573         case FLASH_5761VENDOR_ST_M_M45PE80:
14574         case FLASH_5761VENDOR_ST_M_M45PE16:
14575                 tp->nvram_jedecnum = JEDEC_ST;
14576                 tg3_flag_set(tp, NVRAM_BUFFERED);
14577                 tg3_flag_set(tp, FLASH);
14578                 tp->nvram_pagesize = 256;
14579                 break;
14580         }
14581
14582         if (protect) {
14583                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14584         } else {
14585                 switch (nvcfg1) {
14586                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14587                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14588                 case FLASH_5761VENDOR_ST_A_M45PE16:
14589                 case FLASH_5761VENDOR_ST_M_M45PE16:
14590                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14591                         break;
14592                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14593                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14594                 case FLASH_5761VENDOR_ST_A_M45PE80:
14595                 case FLASH_5761VENDOR_ST_M_M45PE80:
14596                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14597                         break;
14598                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14599                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14600                 case FLASH_5761VENDOR_ST_A_M45PE40:
14601                 case FLASH_5761VENDOR_ST_M_M45PE40:
14602                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14603                         break;
14604                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14605                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14606                 case FLASH_5761VENDOR_ST_A_M45PE20:
14607                 case FLASH_5761VENDOR_ST_M_M45PE20:
14608                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14609                         break;
14610                 }
14611         }
14612 }
14613
14614 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14615 {
14616         tp->nvram_jedecnum = JEDEC_ATMEL;
14617         tg3_flag_set(tp, NVRAM_BUFFERED);
14618         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14619 }
14620
14621 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14622 {
14623         u32 nvcfg1;
14624
14625         nvcfg1 = tr32(NVRAM_CFG1);
14626
14627         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14628         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14629         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14630                 tp->nvram_jedecnum = JEDEC_ATMEL;
14631                 tg3_flag_set(tp, NVRAM_BUFFERED);
14632                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14633
14634                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14635                 tw32(NVRAM_CFG1, nvcfg1);
14636                 return;
14637         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14638         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14639         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14640         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14641         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14642         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14643         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14644                 tp->nvram_jedecnum = JEDEC_ATMEL;
14645                 tg3_flag_set(tp, NVRAM_BUFFERED);
14646                 tg3_flag_set(tp, FLASH);
14647
14648                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14649                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14650                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14651                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14652                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14653                         break;
14654                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14655                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14656                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14657                         break;
14658                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14659                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14660                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14661                         break;
14662                 }
14663                 break;
14664         case FLASH_5752VENDOR_ST_M45PE10:
14665         case FLASH_5752VENDOR_ST_M45PE20:
14666         case FLASH_5752VENDOR_ST_M45PE40:
14667                 tp->nvram_jedecnum = JEDEC_ST;
14668                 tg3_flag_set(tp, NVRAM_BUFFERED);
14669                 tg3_flag_set(tp, FLASH);
14670
14671                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14672                 case FLASH_5752VENDOR_ST_M45PE10:
14673                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14674                         break;
14675                 case FLASH_5752VENDOR_ST_M45PE20:
14676                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14677                         break;
14678                 case FLASH_5752VENDOR_ST_M45PE40:
14679                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14680                         break;
14681                 }
14682                 break;
14683         default:
14684                 tg3_flag_set(tp, NO_NVRAM);
14685                 return;
14686         }
14687
14688         tg3_nvram_get_pagesize(tp, nvcfg1);
14689         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14690                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14691 }
14692
14693
14694 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14695 {
14696         u32 nvcfg1;
14697
14698         nvcfg1 = tr32(NVRAM_CFG1);
14699
14700         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14701         case FLASH_5717VENDOR_ATMEL_EEPROM:
14702         case FLASH_5717VENDOR_MICRO_EEPROM:
14703                 tp->nvram_jedecnum = JEDEC_ATMEL;
14704                 tg3_flag_set(tp, NVRAM_BUFFERED);
14705                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14706
14707                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14708                 tw32(NVRAM_CFG1, nvcfg1);
14709                 return;
14710         case FLASH_5717VENDOR_ATMEL_MDB011D:
14711         case FLASH_5717VENDOR_ATMEL_ADB011B:
14712         case FLASH_5717VENDOR_ATMEL_ADB011D:
14713         case FLASH_5717VENDOR_ATMEL_MDB021D:
14714         case FLASH_5717VENDOR_ATMEL_ADB021B:
14715         case FLASH_5717VENDOR_ATMEL_ADB021D:
14716         case FLASH_5717VENDOR_ATMEL_45USPT:
14717                 tp->nvram_jedecnum = JEDEC_ATMEL;
14718                 tg3_flag_set(tp, NVRAM_BUFFERED);
14719                 tg3_flag_set(tp, FLASH);
14720
14721                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14722                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14723                         /* Detect size with tg3_nvram_get_size() */
14724                         break;
14725                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14726                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14727                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14728                         break;
14729                 default:
14730                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14731                         break;
14732                 }
14733                 break;
14734         case FLASH_5717VENDOR_ST_M_M25PE10:
14735         case FLASH_5717VENDOR_ST_A_M25PE10:
14736         case FLASH_5717VENDOR_ST_M_M45PE10:
14737         case FLASH_5717VENDOR_ST_A_M45PE10:
14738         case FLASH_5717VENDOR_ST_M_M25PE20:
14739         case FLASH_5717VENDOR_ST_A_M25PE20:
14740         case FLASH_5717VENDOR_ST_M_M45PE20:
14741         case FLASH_5717VENDOR_ST_A_M45PE20:
14742         case FLASH_5717VENDOR_ST_25USPT:
14743         case FLASH_5717VENDOR_ST_45USPT:
14744                 tp->nvram_jedecnum = JEDEC_ST;
14745                 tg3_flag_set(tp, NVRAM_BUFFERED);
14746                 tg3_flag_set(tp, FLASH);
14747
14748                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14749                 case FLASH_5717VENDOR_ST_M_M25PE20:
14750                 case FLASH_5717VENDOR_ST_M_M45PE20:
14751                         /* Detect size with tg3_nvram_get_size() */
14752                         break;
14753                 case FLASH_5717VENDOR_ST_A_M25PE20:
14754                 case FLASH_5717VENDOR_ST_A_M45PE20:
14755                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14756                         break;
14757                 default:
14758                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14759                         break;
14760                 }
14761                 break;
14762         default:
14763                 tg3_flag_set(tp, NO_NVRAM);
14764                 return;
14765         }
14766
14767         tg3_nvram_get_pagesize(tp, nvcfg1);
14768         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14769                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14770 }
14771
14772 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14773 {
14774         u32 nvcfg1, nvmpinstrp;
14775
14776         nvcfg1 = tr32(NVRAM_CFG1);
14777         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14778
14779         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14780                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14781                         tg3_flag_set(tp, NO_NVRAM);
14782                         return;
14783                 }
14784
14785                 switch (nvmpinstrp) {
14786                 case FLASH_5762_EEPROM_HD:
14787                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14788                         break;
14789                 case FLASH_5762_EEPROM_LD:
14790                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14791                         break;
14792                 case FLASH_5720VENDOR_M_ST_M45PE20:
14793                         /* This pinstrap supports multiple sizes, so force it
14794                          * to read the actual size from location 0xf0.
14795                          */
14796                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14797                         break;
14798                 }
14799         }
14800
14801         switch (nvmpinstrp) {
14802         case FLASH_5720_EEPROM_HD:
14803         case FLASH_5720_EEPROM_LD:
14804                 tp->nvram_jedecnum = JEDEC_ATMEL;
14805                 tg3_flag_set(tp, NVRAM_BUFFERED);
14806
14807                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14808                 tw32(NVRAM_CFG1, nvcfg1);
14809                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14810                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14811                 else
14812                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14813                 return;
14814         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14815         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14816         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14817         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14818         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14819         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14820         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14821         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14822         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14823         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14824         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14825         case FLASH_5720VENDOR_ATMEL_45USPT:
14826                 tp->nvram_jedecnum = JEDEC_ATMEL;
14827                 tg3_flag_set(tp, NVRAM_BUFFERED);
14828                 tg3_flag_set(tp, FLASH);
14829
14830                 switch (nvmpinstrp) {
14831                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14832                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14833                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14834                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14835                         break;
14836                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14837                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14838                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14839                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14840                         break;
14841                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14842                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14843                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14844                         break;
14845                 default:
14846                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14847                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14848                         break;
14849                 }
14850                 break;
14851         case FLASH_5720VENDOR_M_ST_M25PE10:
14852         case FLASH_5720VENDOR_M_ST_M45PE10:
14853         case FLASH_5720VENDOR_A_ST_M25PE10:
14854         case FLASH_5720VENDOR_A_ST_M45PE10:
14855         case FLASH_5720VENDOR_M_ST_M25PE20:
14856         case FLASH_5720VENDOR_M_ST_M45PE20:
14857         case FLASH_5720VENDOR_A_ST_M25PE20:
14858         case FLASH_5720VENDOR_A_ST_M45PE20:
14859         case FLASH_5720VENDOR_M_ST_M25PE40:
14860         case FLASH_5720VENDOR_M_ST_M45PE40:
14861         case FLASH_5720VENDOR_A_ST_M25PE40:
14862         case FLASH_5720VENDOR_A_ST_M45PE40:
14863         case FLASH_5720VENDOR_M_ST_M25PE80:
14864         case FLASH_5720VENDOR_M_ST_M45PE80:
14865         case FLASH_5720VENDOR_A_ST_M25PE80:
14866         case FLASH_5720VENDOR_A_ST_M45PE80:
14867         case FLASH_5720VENDOR_ST_25USPT:
14868         case FLASH_5720VENDOR_ST_45USPT:
14869                 tp->nvram_jedecnum = JEDEC_ST;
14870                 tg3_flag_set(tp, NVRAM_BUFFERED);
14871                 tg3_flag_set(tp, FLASH);
14872
14873                 switch (nvmpinstrp) {
14874                 case FLASH_5720VENDOR_M_ST_M25PE20:
14875                 case FLASH_5720VENDOR_M_ST_M45PE20:
14876                 case FLASH_5720VENDOR_A_ST_M25PE20:
14877                 case FLASH_5720VENDOR_A_ST_M45PE20:
14878                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14879                         break;
14880                 case FLASH_5720VENDOR_M_ST_M25PE40:
14881                 case FLASH_5720VENDOR_M_ST_M45PE40:
14882                 case FLASH_5720VENDOR_A_ST_M25PE40:
14883                 case FLASH_5720VENDOR_A_ST_M45PE40:
14884                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14885                         break;
14886                 case FLASH_5720VENDOR_M_ST_M25PE80:
14887                 case FLASH_5720VENDOR_M_ST_M45PE80:
14888                 case FLASH_5720VENDOR_A_ST_M25PE80:
14889                 case FLASH_5720VENDOR_A_ST_M45PE80:
14890                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14891                         break;
14892                 default:
14893                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14894                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14895                         break;
14896                 }
14897                 break;
14898         default:
14899                 tg3_flag_set(tp, NO_NVRAM);
14900                 return;
14901         }
14902
14903         tg3_nvram_get_pagesize(tp, nvcfg1);
14904         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14905                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14906
14907         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14908                 u32 val;
14909
14910                 if (tg3_nvram_read(tp, 0, &val))
14911                         return;
14912
14913                 if (val != TG3_EEPROM_MAGIC &&
14914                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14915                         tg3_flag_set(tp, NO_NVRAM);
14916         }
14917 }
14918
14919 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14920 static void tg3_nvram_init(struct tg3 *tp)
14921 {
14922         if (tg3_flag(tp, IS_SSB_CORE)) {
14923                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14924                 tg3_flag_clear(tp, NVRAM);
14925                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14926                 tg3_flag_set(tp, NO_NVRAM);
14927                 return;
14928         }
14929
14930         tw32_f(GRC_EEPROM_ADDR,
14931              (EEPROM_ADDR_FSM_RESET |
14932               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14933                EEPROM_ADDR_CLKPERD_SHIFT)));
14934
14935         msleep(1);
14936
14937         /* Enable seeprom accesses. */
14938         tw32_f(GRC_LOCAL_CTRL,
14939              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14940         udelay(100);
14941
14942         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14943             tg3_asic_rev(tp) != ASIC_REV_5701) {
14944                 tg3_flag_set(tp, NVRAM);
14945
14946                 if (tg3_nvram_lock(tp)) {
14947                         netdev_warn(tp->dev,
14948                                     "Cannot get nvram lock, %s failed\n",
14949                                     __func__);
14950                         return;
14951                 }
14952                 tg3_enable_nvram_access(tp);
14953
14954                 tp->nvram_size = 0;
14955
14956                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14957                         tg3_get_5752_nvram_info(tp);
14958                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14959                         tg3_get_5755_nvram_info(tp);
14960                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14961                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14962                          tg3_asic_rev(tp) == ASIC_REV_5785)
14963                         tg3_get_5787_nvram_info(tp);
14964                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14965                         tg3_get_5761_nvram_info(tp);
14966                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14967                         tg3_get_5906_nvram_info(tp);
14968                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14969                          tg3_flag(tp, 57765_CLASS))
14970                         tg3_get_57780_nvram_info(tp);
14971                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14972                          tg3_asic_rev(tp) == ASIC_REV_5719)
14973                         tg3_get_5717_nvram_info(tp);
14974                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14975                          tg3_asic_rev(tp) == ASIC_REV_5762)
14976                         tg3_get_5720_nvram_info(tp);
14977                 else
14978                         tg3_get_nvram_info(tp);
14979
14980                 if (tp->nvram_size == 0)
14981                         tg3_get_nvram_size(tp);
14982
14983                 tg3_disable_nvram_access(tp);
14984                 tg3_nvram_unlock(tp);
14985
14986         } else {
14987                 tg3_flag_clear(tp, NVRAM);
14988                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14989
14990                 tg3_get_eeprom_size(tp);
14991         }
14992 }
14993
14994 struct subsys_tbl_ent {
14995         u16 subsys_vendor, subsys_devid;
14996         u32 phy_id;
14997 };
14998
14999 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15000         /* Broadcom boards. */
15001         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15002           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15003         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15004           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15005         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15006           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15007         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15008           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15009         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15010           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15011         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15012           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15013         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15014           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15015         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15016           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15017         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15018           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15019         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15020           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15021         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15022           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15023
15024         /* 3com boards. */
15025         { TG3PCI_SUBVENDOR_ID_3COM,
15026           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15027         { TG3PCI_SUBVENDOR_ID_3COM,
15028           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15029         { TG3PCI_SUBVENDOR_ID_3COM,
15030           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15031         { TG3PCI_SUBVENDOR_ID_3COM,
15032           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15033         { TG3PCI_SUBVENDOR_ID_3COM,
15034           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15035
15036         /* DELL boards. */
15037         { TG3PCI_SUBVENDOR_ID_DELL,
15038           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15039         { TG3PCI_SUBVENDOR_ID_DELL,
15040           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15041         { TG3PCI_SUBVENDOR_ID_DELL,
15042           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15043         { TG3PCI_SUBVENDOR_ID_DELL,
15044           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15045
15046         /* Compaq boards. */
15047         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15048           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15049         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15050           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15051         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15052           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15053         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15054           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15055         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15056           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15057
15058         /* IBM boards. */
15059         { TG3PCI_SUBVENDOR_ID_IBM,
15060           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15061 };
15062
15063 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15064 {
15065         int i;
15066
15067         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15068                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15069                      tp->pdev->subsystem_vendor) &&
15070                     (subsys_id_to_phy_id[i].subsys_devid ==
15071                      tp->pdev->subsystem_device))
15072                         return &subsys_id_to_phy_id[i];
15073         }
15074         return NULL;
15075 }
15076
15077 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15078 {
15079         u32 val;
15080
15081         tp->phy_id = TG3_PHY_ID_INVALID;
15082         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15083
15084         /* Assume an onboard device and WOL capable by default.  */
15085         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15086         tg3_flag_set(tp, WOL_CAP);
15087
15088         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15089                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15090                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15091                         tg3_flag_set(tp, IS_NIC);
15092                 }
15093                 val = tr32(VCPU_CFGSHDW);
15094                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15095                         tg3_flag_set(tp, ASPM_WORKAROUND);
15096                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15097                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15098                         tg3_flag_set(tp, WOL_ENABLE);
15099                         device_set_wakeup_enable(&tp->pdev->dev, true);
15100                 }
15101                 goto done;
15102         }
15103
15104         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15105         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15106                 u32 nic_cfg, led_cfg;
15107                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15108                 u32 nic_phy_id, ver, eeprom_phy_id;
15109                 int eeprom_phy_serdes = 0;
15110
15111                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15112                 tp->nic_sram_data_cfg = nic_cfg;
15113
15114                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15115                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15116                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15117                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15118                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15119                     (ver > 0) && (ver < 0x100))
15120                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15121
15122                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15123                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15124
15125                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15126                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15127                     tg3_asic_rev(tp) == ASIC_REV_5720)
15128                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15129
15130                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15131                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15132                         eeprom_phy_serdes = 1;
15133
15134                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15135                 if (nic_phy_id != 0) {
15136                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15137                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15138
15139                         eeprom_phy_id  = (id1 >> 16) << 10;
15140                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15141                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15142                 } else
15143                         eeprom_phy_id = 0;
15144
15145                 tp->phy_id = eeprom_phy_id;
15146                 if (eeprom_phy_serdes) {
15147                         if (!tg3_flag(tp, 5705_PLUS))
15148                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15149                         else
15150                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15151                 }
15152
15153                 if (tg3_flag(tp, 5750_PLUS))
15154                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15155                                     SHASTA_EXT_LED_MODE_MASK);
15156                 else
15157                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15158
15159                 switch (led_cfg) {
15160                 default:
15161                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15162                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15163                         break;
15164
15165                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15166                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15167                         break;
15168
15169                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15170                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15171
15172                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15173                          * read on some older 5700/5701 bootcode.
15174                          */
15175                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15176                             tg3_asic_rev(tp) == ASIC_REV_5701)
15177                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15178
15179                         break;
15180
15181                 case SHASTA_EXT_LED_SHARED:
15182                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15183                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15184                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15185                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15186                                                  LED_CTRL_MODE_PHY_2);
15187
15188                         if (tg3_flag(tp, 5717_PLUS) ||
15189                             tg3_asic_rev(tp) == ASIC_REV_5762)
15190                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15191                                                 LED_CTRL_BLINK_RATE_MASK;
15192
15193                         break;
15194
15195                 case SHASTA_EXT_LED_MAC:
15196                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15197                         break;
15198
15199                 case SHASTA_EXT_LED_COMBO:
15200                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15201                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15202                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15203                                                  LED_CTRL_MODE_PHY_2);
15204                         break;
15205
15206                 }
15207
15208                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15209                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15210                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15211                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15212
15213                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15214                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15215
15216                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15217                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15218                         if ((tp->pdev->subsystem_vendor ==
15219                              PCI_VENDOR_ID_ARIMA) &&
15220                             (tp->pdev->subsystem_device == 0x205a ||
15221                              tp->pdev->subsystem_device == 0x2063))
15222                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15223                 } else {
15224                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15225                         tg3_flag_set(tp, IS_NIC);
15226                 }
15227
15228                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15229                         tg3_flag_set(tp, ENABLE_ASF);
15230                         if (tg3_flag(tp, 5750_PLUS))
15231                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15232                 }
15233
15234                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15235                     tg3_flag(tp, 5750_PLUS))
15236                         tg3_flag_set(tp, ENABLE_APE);
15237
15238                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15239                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15240                         tg3_flag_clear(tp, WOL_CAP);
15241
15242                 if (tg3_flag(tp, WOL_CAP) &&
15243                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15244                         tg3_flag_set(tp, WOL_ENABLE);
15245                         device_set_wakeup_enable(&tp->pdev->dev, true);
15246                 }
15247
15248                 if (cfg2 & (1 << 17))
15249                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15250
15251                 /* serdes signal pre-emphasis in register 0x590 set by */
15252                 /* bootcode if bit 18 is set */
15253                 if (cfg2 & (1 << 18))
15254                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15255
15256                 if ((tg3_flag(tp, 57765_PLUS) ||
15257                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15258                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15259                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15260                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15261
15262                 if (tg3_flag(tp, PCI_EXPRESS)) {
15263                         u32 cfg3;
15264
15265                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15266                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15267                             !tg3_flag(tp, 57765_PLUS) &&
15268                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15269                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15270                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15271                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15272                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15273                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15274                 }
15275
15276                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15277                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15278                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15279                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15280                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15281                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15282
15283                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15284                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15285         }
15286 done:
15287         if (tg3_flag(tp, WOL_CAP))
15288                 device_set_wakeup_enable(&tp->pdev->dev,
15289                                          tg3_flag(tp, WOL_ENABLE));
15290         else
15291                 device_set_wakeup_capable(&tp->pdev->dev, false);
15292 }
15293
15294 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15295 {
15296         int i, err;
15297         u32 val2, off = offset * 8;
15298
15299         err = tg3_nvram_lock(tp);
15300         if (err)
15301                 return err;
15302
15303         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15304         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15305                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15306         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15307         udelay(10);
15308
15309         for (i = 0; i < 100; i++) {
15310                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15311                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15312                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15313                         break;
15314                 }
15315                 udelay(10);
15316         }
15317
15318         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15319
15320         tg3_nvram_unlock(tp);
15321         if (val2 & APE_OTP_STATUS_CMD_DONE)
15322                 return 0;
15323
15324         return -EBUSY;
15325 }
15326
15327 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15328 {
15329         int i;
15330         u32 val;
15331
15332         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15333         tw32(OTP_CTRL, cmd);
15334
15335         /* Wait for up to 1 ms for command to execute. */
15336         for (i = 0; i < 100; i++) {
15337                 val = tr32(OTP_STATUS);
15338                 if (val & OTP_STATUS_CMD_DONE)
15339                         break;
15340                 udelay(10);
15341         }
15342
15343         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15344 }
15345
15346 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15347  * configuration is a 32-bit value that straddles the alignment boundary.
15348  * We do two 32-bit reads and then shift and merge the results.
15349  */
15350 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15351 {
15352         u32 bhalf_otp, thalf_otp;
15353
15354         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15355
15356         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15357                 return 0;
15358
15359         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15360
15361         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15362                 return 0;
15363
15364         thalf_otp = tr32(OTP_READ_DATA);
15365
15366         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15367
15368         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15369                 return 0;
15370
15371         bhalf_otp = tr32(OTP_READ_DATA);
15372
15373         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15374 }
15375
15376 static void tg3_phy_init_link_config(struct tg3 *tp)
15377 {
15378         u32 adv = ADVERTISED_Autoneg;
15379
15380         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15381                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15382                         adv |= ADVERTISED_1000baseT_Half;
15383                 adv |= ADVERTISED_1000baseT_Full;
15384         }
15385
15386         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15387                 adv |= ADVERTISED_100baseT_Half |
15388                        ADVERTISED_100baseT_Full |
15389                        ADVERTISED_10baseT_Half |
15390                        ADVERTISED_10baseT_Full |
15391                        ADVERTISED_TP;
15392         else
15393                 adv |= ADVERTISED_FIBRE;
15394
15395         tp->link_config.advertising = adv;
15396         tp->link_config.speed = SPEED_UNKNOWN;
15397         tp->link_config.duplex = DUPLEX_UNKNOWN;
15398         tp->link_config.autoneg = AUTONEG_ENABLE;
15399         tp->link_config.active_speed = SPEED_UNKNOWN;
15400         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15401
15402         tp->old_link = -1;
15403 }
15404
15405 static int tg3_phy_probe(struct tg3 *tp)
15406 {
15407         u32 hw_phy_id_1, hw_phy_id_2;
15408         u32 hw_phy_id, hw_phy_id_masked;
15409         int err;
15410
15411         /* flow control autonegotiation is default behavior */
15412         tg3_flag_set(tp, PAUSE_AUTONEG);
15413         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15414
15415         if (tg3_flag(tp, ENABLE_APE)) {
15416                 switch (tp->pci_fn) {
15417                 case 0:
15418                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15419                         break;
15420                 case 1:
15421                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15422                         break;
15423                 case 2:
15424                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15425                         break;
15426                 case 3:
15427                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15428                         break;
15429                 }
15430         }
15431
15432         if (!tg3_flag(tp, ENABLE_ASF) &&
15433             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15434             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15435                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15436                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15437
15438         if (tg3_flag(tp, USE_PHYLIB))
15439                 return tg3_phy_init(tp);
15440
15441         /* Reading the PHY ID register can conflict with ASF
15442          * firmware access to the PHY hardware.
15443          */
15444         err = 0;
15445         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15446                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15447         } else {
15448                 /* Now read the physical PHY_ID from the chip and verify
15449                  * that it is sane.  If it doesn't look good, we fall back
15450                  * to either the hard-coded table based PHY_ID and failing
15451                  * that the value found in the eeprom area.
15452                  */
15453                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15454                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15455
15456                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15457                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15458                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15459
15460                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15461         }
15462
15463         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15464                 tp->phy_id = hw_phy_id;
15465                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15466                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15467                 else
15468                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15469         } else {
15470                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15471                         /* Do nothing, phy ID already set up in
15472                          * tg3_get_eeprom_hw_cfg().
15473                          */
15474                 } else {
15475                         struct subsys_tbl_ent *p;
15476
15477                         /* No eeprom signature?  Try the hardcoded
15478                          * subsys device table.
15479                          */
15480                         p = tg3_lookup_by_subsys(tp);
15481                         if (p) {
15482                                 tp->phy_id = p->phy_id;
15483                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15484                                 /* For now we saw the IDs 0xbc050cd0,
15485                                  * 0xbc050f80 and 0xbc050c30 on devices
15486                                  * connected to an BCM4785 and there are
15487                                  * probably more. Just assume that the phy is
15488                                  * supported when it is connected to a SSB core
15489                                  * for now.
15490                                  */
15491                                 return -ENODEV;
15492                         }
15493
15494                         if (!tp->phy_id ||
15495                             tp->phy_id == TG3_PHY_ID_BCM8002)
15496                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15497                 }
15498         }
15499
15500         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15501             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15502              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15503              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15504              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15505              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15506               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15507              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15508               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15509                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15510
15511                 tp->eee.supported = SUPPORTED_100baseT_Full |
15512                                     SUPPORTED_1000baseT_Full;
15513                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15514                                      ADVERTISED_1000baseT_Full;
15515                 tp->eee.eee_enabled = 1;
15516                 tp->eee.tx_lpi_enabled = 1;
15517                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15518         }
15519
15520         tg3_phy_init_link_config(tp);
15521
15522         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15523             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15524             !tg3_flag(tp, ENABLE_APE) &&
15525             !tg3_flag(tp, ENABLE_ASF)) {
15526                 u32 bmsr, dummy;
15527
15528                 tg3_readphy(tp, MII_BMSR, &bmsr);
15529                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15530                     (bmsr & BMSR_LSTATUS))
15531                         goto skip_phy_reset;
15532
15533                 err = tg3_phy_reset(tp);
15534                 if (err)
15535                         return err;
15536
15537                 tg3_phy_set_wirespeed(tp);
15538
15539                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15540                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15541                                             tp->link_config.flowctrl);
15542
15543                         tg3_writephy(tp, MII_BMCR,
15544                                      BMCR_ANENABLE | BMCR_ANRESTART);
15545                 }
15546         }
15547
15548 skip_phy_reset:
15549         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15550                 err = tg3_init_5401phy_dsp(tp);
15551                 if (err)
15552                         return err;
15553
15554                 err = tg3_init_5401phy_dsp(tp);
15555         }
15556
15557         return err;
15558 }
15559
15560 static void tg3_read_vpd(struct tg3 *tp)
15561 {
15562         u8 *vpd_data;
15563         unsigned int block_end, rosize, len;
15564         u32 vpdlen;
15565         int j, i = 0;
15566
15567         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15568         if (!vpd_data)
15569                 goto out_no_vpd;
15570
15571         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15572         if (i < 0)
15573                 goto out_not_found;
15574
15575         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15576         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15577         i += PCI_VPD_LRDT_TAG_SIZE;
15578
15579         if (block_end > vpdlen)
15580                 goto out_not_found;
15581
15582         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15583                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15584         if (j > 0) {
15585                 len = pci_vpd_info_field_size(&vpd_data[j]);
15586
15587                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15588                 if (j + len > block_end || len != 4 ||
15589                     memcmp(&vpd_data[j], "1028", 4))
15590                         goto partno;
15591
15592                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15593                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15594                 if (j < 0)
15595                         goto partno;
15596
15597                 len = pci_vpd_info_field_size(&vpd_data[j]);
15598
15599                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15600                 if (j + len > block_end)
15601                         goto partno;
15602
15603                 if (len >= sizeof(tp->fw_ver))
15604                         len = sizeof(tp->fw_ver) - 1;
15605                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15606                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15607                          &vpd_data[j]);
15608         }
15609
15610 partno:
15611         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15612                                       PCI_VPD_RO_KEYWORD_PARTNO);
15613         if (i < 0)
15614                 goto out_not_found;
15615
15616         len = pci_vpd_info_field_size(&vpd_data[i]);
15617
15618         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15619         if (len > TG3_BPN_SIZE ||
15620             (len + i) > vpdlen)
15621                 goto out_not_found;
15622
15623         memcpy(tp->board_part_number, &vpd_data[i], len);
15624
15625 out_not_found:
15626         kfree(vpd_data);
15627         if (tp->board_part_number[0])
15628                 return;
15629
15630 out_no_vpd:
15631         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15632                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15633                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15634                         strcpy(tp->board_part_number, "BCM5717");
15635                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15636                         strcpy(tp->board_part_number, "BCM5718");
15637                 else
15638                         goto nomatch;
15639         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15640                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15641                         strcpy(tp->board_part_number, "BCM57780");
15642                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15643                         strcpy(tp->board_part_number, "BCM57760");
15644                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15645                         strcpy(tp->board_part_number, "BCM57790");
15646                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15647                         strcpy(tp->board_part_number, "BCM57788");
15648                 else
15649                         goto nomatch;
15650         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15651                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15652                         strcpy(tp->board_part_number, "BCM57761");
15653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15654                         strcpy(tp->board_part_number, "BCM57765");
15655                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15656                         strcpy(tp->board_part_number, "BCM57781");
15657                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15658                         strcpy(tp->board_part_number, "BCM57785");
15659                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15660                         strcpy(tp->board_part_number, "BCM57791");
15661                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15662                         strcpy(tp->board_part_number, "BCM57795");
15663                 else
15664                         goto nomatch;
15665         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15666                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15667                         strcpy(tp->board_part_number, "BCM57762");
15668                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15669                         strcpy(tp->board_part_number, "BCM57766");
15670                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15671                         strcpy(tp->board_part_number, "BCM57782");
15672                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15673                         strcpy(tp->board_part_number, "BCM57786");
15674                 else
15675                         goto nomatch;
15676         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15677                 strcpy(tp->board_part_number, "BCM95906");
15678         } else {
15679 nomatch:
15680                 strcpy(tp->board_part_number, "none");
15681         }
15682 }
15683
15684 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15685 {
15686         u32 val;
15687
15688         if (tg3_nvram_read(tp, offset, &val) ||
15689             (val & 0xfc000000) != 0x0c000000 ||
15690             tg3_nvram_read(tp, offset + 4, &val) ||
15691             val != 0)
15692                 return 0;
15693
15694         return 1;
15695 }
15696
15697 static void tg3_read_bc_ver(struct tg3 *tp)
15698 {
15699         u32 val, offset, start, ver_offset;
15700         int i, dst_off;
15701         bool newver = false;
15702
15703         if (tg3_nvram_read(tp, 0xc, &offset) ||
15704             tg3_nvram_read(tp, 0x4, &start))
15705                 return;
15706
15707         offset = tg3_nvram_logical_addr(tp, offset);
15708
15709         if (tg3_nvram_read(tp, offset, &val))
15710                 return;
15711
15712         if ((val & 0xfc000000) == 0x0c000000) {
15713                 if (tg3_nvram_read(tp, offset + 4, &val))
15714                         return;
15715
15716                 if (val == 0)
15717                         newver = true;
15718         }
15719
15720         dst_off = strlen(tp->fw_ver);
15721
15722         if (newver) {
15723                 if (TG3_VER_SIZE - dst_off < 16 ||
15724                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15725                         return;
15726
15727                 offset = offset + ver_offset - start;
15728                 for (i = 0; i < 16; i += 4) {
15729                         __be32 v;
15730                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15731                                 return;
15732
15733                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15734                 }
15735         } else {
15736                 u32 major, minor;
15737
15738                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15739                         return;
15740
15741                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15742                         TG3_NVM_BCVER_MAJSFT;
15743                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15744                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15745                          "v%d.%02d", major, minor);
15746         }
15747 }
15748
15749 static void tg3_read_hwsb_ver(struct tg3 *tp)
15750 {
15751         u32 val, major, minor;
15752
15753         /* Use native endian representation */
15754         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15755                 return;
15756
15757         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15758                 TG3_NVM_HWSB_CFG1_MAJSFT;
15759         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15760                 TG3_NVM_HWSB_CFG1_MINSFT;
15761
15762         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15763 }
15764
15765 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15766 {
15767         u32 offset, major, minor, build;
15768
15769         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15770
15771         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15772                 return;
15773
15774         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15775         case TG3_EEPROM_SB_REVISION_0:
15776                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15777                 break;
15778         case TG3_EEPROM_SB_REVISION_2:
15779                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15780                 break;
15781         case TG3_EEPROM_SB_REVISION_3:
15782                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15783                 break;
15784         case TG3_EEPROM_SB_REVISION_4:
15785                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15786                 break;
15787         case TG3_EEPROM_SB_REVISION_5:
15788                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15789                 break;
15790         case TG3_EEPROM_SB_REVISION_6:
15791                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15792                 break;
15793         default:
15794                 return;
15795         }
15796
15797         if (tg3_nvram_read(tp, offset, &val))
15798                 return;
15799
15800         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15801                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15802         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15803                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15804         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15805
15806         if (minor > 99 || build > 26)
15807                 return;
15808
15809         offset = strlen(tp->fw_ver);
15810         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15811                  " v%d.%02d", major, minor);
15812
15813         if (build > 0) {
15814                 offset = strlen(tp->fw_ver);
15815                 if (offset < TG3_VER_SIZE - 1)
15816                         tp->fw_ver[offset] = 'a' + build - 1;
15817         }
15818 }
15819
15820 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15821 {
15822         u32 val, offset, start;
15823         int i, vlen;
15824
15825         for (offset = TG3_NVM_DIR_START;
15826              offset < TG3_NVM_DIR_END;
15827              offset += TG3_NVM_DIRENT_SIZE) {
15828                 if (tg3_nvram_read(tp, offset, &val))
15829                         return;
15830
15831                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15832                         break;
15833         }
15834
15835         if (offset == TG3_NVM_DIR_END)
15836                 return;
15837
15838         if (!tg3_flag(tp, 5705_PLUS))
15839                 start = 0x08000000;
15840         else if (tg3_nvram_read(tp, offset - 4, &start))
15841                 return;
15842
15843         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15844             !tg3_fw_img_is_valid(tp, offset) ||
15845             tg3_nvram_read(tp, offset + 8, &val))
15846                 return;
15847
15848         offset += val - start;
15849
15850         vlen = strlen(tp->fw_ver);
15851
15852         tp->fw_ver[vlen++] = ',';
15853         tp->fw_ver[vlen++] = ' ';
15854
15855         for (i = 0; i < 4; i++) {
15856                 __be32 v;
15857                 if (tg3_nvram_read_be32(tp, offset, &v))
15858                         return;
15859
15860                 offset += sizeof(v);
15861
15862                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15863                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15864                         break;
15865                 }
15866
15867                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15868                 vlen += sizeof(v);
15869         }
15870 }
15871
15872 static void tg3_probe_ncsi(struct tg3 *tp)
15873 {
15874         u32 apedata;
15875
15876         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15877         if (apedata != APE_SEG_SIG_MAGIC)
15878                 return;
15879
15880         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15881         if (!(apedata & APE_FW_STATUS_READY))
15882                 return;
15883
15884         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15885                 tg3_flag_set(tp, APE_HAS_NCSI);
15886 }
15887
15888 static void tg3_read_dash_ver(struct tg3 *tp)
15889 {
15890         int vlen;
15891         u32 apedata;
15892         char *fwtype;
15893
15894         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15895
15896         if (tg3_flag(tp, APE_HAS_NCSI))
15897                 fwtype = "NCSI";
15898         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15899                 fwtype = "SMASH";
15900         else
15901                 fwtype = "DASH";
15902
15903         vlen = strlen(tp->fw_ver);
15904
15905         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15906                  fwtype,
15907                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15908                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15909                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15910                  (apedata & APE_FW_VERSION_BLDMSK));
15911 }
15912
15913 static void tg3_read_otp_ver(struct tg3 *tp)
15914 {
15915         u32 val, val2;
15916
15917         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15918                 return;
15919
15920         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15921             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15922             TG3_OTP_MAGIC0_VALID(val)) {
15923                 u64 val64 = (u64) val << 32 | val2;
15924                 u32 ver = 0;
15925                 int i, vlen;
15926
15927                 for (i = 0; i < 7; i++) {
15928                         if ((val64 & 0xff) == 0)
15929                                 break;
15930                         ver = val64 & 0xff;
15931                         val64 >>= 8;
15932                 }
15933                 vlen = strlen(tp->fw_ver);
15934                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15935         }
15936 }
15937
15938 static void tg3_read_fw_ver(struct tg3 *tp)
15939 {
15940         u32 val;
15941         bool vpd_vers = false;
15942
15943         if (tp->fw_ver[0] != 0)
15944                 vpd_vers = true;
15945
15946         if (tg3_flag(tp, NO_NVRAM)) {
15947                 strcat(tp->fw_ver, "sb");
15948                 tg3_read_otp_ver(tp);
15949                 return;
15950         }
15951
15952         if (tg3_nvram_read(tp, 0, &val))
15953                 return;
15954
15955         if (val == TG3_EEPROM_MAGIC)
15956                 tg3_read_bc_ver(tp);
15957         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15958                 tg3_read_sb_ver(tp, val);
15959         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15960                 tg3_read_hwsb_ver(tp);
15961
15962         if (tg3_flag(tp, ENABLE_ASF)) {
15963                 if (tg3_flag(tp, ENABLE_APE)) {
15964                         tg3_probe_ncsi(tp);
15965                         if (!vpd_vers)
15966                                 tg3_read_dash_ver(tp);
15967                 } else if (!vpd_vers) {
15968                         tg3_read_mgmtfw_ver(tp);
15969                 }
15970         }
15971
15972         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15973 }
15974
15975 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15976 {
15977         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15978                 return TG3_RX_RET_MAX_SIZE_5717;
15979         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15980                 return TG3_RX_RET_MAX_SIZE_5700;
15981         else
15982                 return TG3_RX_RET_MAX_SIZE_5705;
15983 }
15984
15985 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15986         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15987         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15988         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15989         { },
15990 };
15991
15992 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15993 {
15994         struct pci_dev *peer;
15995         unsigned int func, devnr = tp->pdev->devfn & ~7;
15996
15997         for (func = 0; func < 8; func++) {
15998                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15999                 if (peer && peer != tp->pdev)
16000                         break;
16001                 pci_dev_put(peer);
16002         }
16003         /* 5704 can be configured in single-port mode, set peer to
16004          * tp->pdev in that case.
16005          */
16006         if (!peer) {
16007                 peer = tp->pdev;
16008                 return peer;
16009         }
16010
16011         /*
16012          * We don't need to keep the refcount elevated; there's no way
16013          * to remove one half of this device without removing the other
16014          */
16015         pci_dev_put(peer);
16016
16017         return peer;
16018 }
16019
16020 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16021 {
16022         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16023         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16024                 u32 reg;
16025
16026                 /* All devices that use the alternate
16027                  * ASIC REV location have a CPMU.
16028                  */
16029                 tg3_flag_set(tp, CPMU_PRESENT);
16030
16031                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16032                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16034                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16036                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16038                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16040                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16041                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16042                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16043                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16045                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16046                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16047                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16048                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16049                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16050                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16051                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16052                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16053                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16054                 else
16055                         reg = TG3PCI_PRODID_ASICREV;
16056
16057                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16058         }
16059
16060         /* Wrong chip ID in 5752 A0. This code can be removed later
16061          * as A0 is not in production.
16062          */
16063         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16064                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16065
16066         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16067                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16068
16069         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16070             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16071             tg3_asic_rev(tp) == ASIC_REV_5720)
16072                 tg3_flag_set(tp, 5717_PLUS);
16073
16074         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16075             tg3_asic_rev(tp) == ASIC_REV_57766)
16076                 tg3_flag_set(tp, 57765_CLASS);
16077
16078         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16079              tg3_asic_rev(tp) == ASIC_REV_5762)
16080                 tg3_flag_set(tp, 57765_PLUS);
16081
16082         /* Intentionally exclude ASIC_REV_5906 */
16083         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16084             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16085             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16086             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16087             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16088             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16089             tg3_flag(tp, 57765_PLUS))
16090                 tg3_flag_set(tp, 5755_PLUS);
16091
16092         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16093             tg3_asic_rev(tp) == ASIC_REV_5714)
16094                 tg3_flag_set(tp, 5780_CLASS);
16095
16096         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16097             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16098             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16099             tg3_flag(tp, 5755_PLUS) ||
16100             tg3_flag(tp, 5780_CLASS))
16101                 tg3_flag_set(tp, 5750_PLUS);
16102
16103         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16104             tg3_flag(tp, 5750_PLUS))
16105                 tg3_flag_set(tp, 5705_PLUS);
16106 }
16107
16108 static bool tg3_10_100_only_device(struct tg3 *tp,
16109                                    const struct pci_device_id *ent)
16110 {
16111         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16112
16113         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16114              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16115             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16116                 return true;
16117
16118         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16119                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16120                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16121                                 return true;
16122                 } else {
16123                         return true;
16124                 }
16125         }
16126
16127         return false;
16128 }
16129
16130 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16131 {
16132         u32 misc_ctrl_reg;
16133         u32 pci_state_reg, grc_misc_cfg;
16134         u32 val;
16135         u16 pci_cmd;
16136         int err;
16137
16138         /* Force memory write invalidate off.  If we leave it on,
16139          * then on 5700_BX chips we have to enable a workaround.
16140          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16141          * to match the cacheline size.  The Broadcom driver have this
16142          * workaround but turns MWI off all the times so never uses
16143          * it.  This seems to suggest that the workaround is insufficient.
16144          */
16145         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16146         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16147         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16148
16149         /* Important! -- Make sure register accesses are byteswapped
16150          * correctly.  Also, for those chips that require it, make
16151          * sure that indirect register accesses are enabled before
16152          * the first operation.
16153          */
16154         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16155                               &misc_ctrl_reg);
16156         tp->misc_host_ctrl |= (misc_ctrl_reg &
16157                                MISC_HOST_CTRL_CHIPREV);
16158         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16159                                tp->misc_host_ctrl);
16160
16161         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16162
16163         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16164          * we need to disable memory and use config. cycles
16165          * only to access all registers. The 5702/03 chips
16166          * can mistakenly decode the special cycles from the
16167          * ICH chipsets as memory write cycles, causing corruption
16168          * of register and memory space. Only certain ICH bridges
16169          * will drive special cycles with non-zero data during the
16170          * address phase which can fall within the 5703's address
16171          * range. This is not an ICH bug as the PCI spec allows
16172          * non-zero address during special cycles. However, only
16173          * these ICH bridges are known to drive non-zero addresses
16174          * during special cycles.
16175          *
16176          * Since special cycles do not cross PCI bridges, we only
16177          * enable this workaround if the 5703 is on the secondary
16178          * bus of these ICH bridges.
16179          */
16180         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16181             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16182                 static struct tg3_dev_id {
16183                         u32     vendor;
16184                         u32     device;
16185                         u32     rev;
16186                 } ich_chipsets[] = {
16187                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16188                           PCI_ANY_ID },
16189                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16190                           PCI_ANY_ID },
16191                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16192                           0xa },
16193                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16194                           PCI_ANY_ID },
16195                         { },
16196                 };
16197                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16198                 struct pci_dev *bridge = NULL;
16199
16200                 while (pci_id->vendor != 0) {
16201                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16202                                                 bridge);
16203                         if (!bridge) {
16204                                 pci_id++;
16205                                 continue;
16206                         }
16207                         if (pci_id->rev != PCI_ANY_ID) {
16208                                 if (bridge->revision > pci_id->rev)
16209                                         continue;
16210                         }
16211                         if (bridge->subordinate &&
16212                             (bridge->subordinate->number ==
16213                              tp->pdev->bus->number)) {
16214                                 tg3_flag_set(tp, ICH_WORKAROUND);
16215                                 pci_dev_put(bridge);
16216                                 break;
16217                         }
16218                 }
16219         }
16220
16221         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16222                 static struct tg3_dev_id {
16223                         u32     vendor;
16224                         u32     device;
16225                 } bridge_chipsets[] = {
16226                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16227                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16228                         { },
16229                 };
16230                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16231                 struct pci_dev *bridge = NULL;
16232
16233                 while (pci_id->vendor != 0) {
16234                         bridge = pci_get_device(pci_id->vendor,
16235                                                 pci_id->device,
16236                                                 bridge);
16237                         if (!bridge) {
16238                                 pci_id++;
16239                                 continue;
16240                         }
16241                         if (bridge->subordinate &&
16242                             (bridge->subordinate->number <=
16243                              tp->pdev->bus->number) &&
16244                             (bridge->subordinate->busn_res.end >=
16245                              tp->pdev->bus->number)) {
16246                                 tg3_flag_set(tp, 5701_DMA_BUG);
16247                                 pci_dev_put(bridge);
16248                                 break;
16249                         }
16250                 }
16251         }
16252
16253         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16254          * DMA addresses > 40-bit. This bridge may have other additional
16255          * 57xx devices behind it in some 4-port NIC designs for example.
16256          * Any tg3 device found behind the bridge will also need the 40-bit
16257          * DMA workaround.
16258          */
16259         if (tg3_flag(tp, 5780_CLASS)) {
16260                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16261                 tp->msi_cap = tp->pdev->msi_cap;
16262         } else {
16263                 struct pci_dev *bridge = NULL;
16264
16265                 do {
16266                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16267                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16268                                                 bridge);
16269                         if (bridge && bridge->subordinate &&
16270                             (bridge->subordinate->number <=
16271                              tp->pdev->bus->number) &&
16272                             (bridge->subordinate->busn_res.end >=
16273                              tp->pdev->bus->number)) {
16274                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16275                                 pci_dev_put(bridge);
16276                                 break;
16277                         }
16278                 } while (bridge);
16279         }
16280
16281         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16282             tg3_asic_rev(tp) == ASIC_REV_5714)
16283                 tp->pdev_peer = tg3_find_peer(tp);
16284
16285         /* Determine TSO capabilities */
16286         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16287                 ; /* Do nothing. HW bug. */
16288         else if (tg3_flag(tp, 57765_PLUS))
16289                 tg3_flag_set(tp, HW_TSO_3);
16290         else if (tg3_flag(tp, 5755_PLUS) ||
16291                  tg3_asic_rev(tp) == ASIC_REV_5906)
16292                 tg3_flag_set(tp, HW_TSO_2);
16293         else if (tg3_flag(tp, 5750_PLUS)) {
16294                 tg3_flag_set(tp, HW_TSO_1);
16295                 tg3_flag_set(tp, TSO_BUG);
16296                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16297                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16298                         tg3_flag_clear(tp, TSO_BUG);
16299         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16300                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16301                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16302                 tg3_flag_set(tp, FW_TSO);
16303                 tg3_flag_set(tp, TSO_BUG);
16304                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16305                         tp->fw_needed = FIRMWARE_TG3TSO5;
16306                 else
16307                         tp->fw_needed = FIRMWARE_TG3TSO;
16308         }
16309
16310         /* Selectively allow TSO based on operating conditions */
16311         if (tg3_flag(tp, HW_TSO_1) ||
16312             tg3_flag(tp, HW_TSO_2) ||
16313             tg3_flag(tp, HW_TSO_3) ||
16314             tg3_flag(tp, FW_TSO)) {
16315                 /* For firmware TSO, assume ASF is disabled.
16316                  * We'll disable TSO later if we discover ASF
16317                  * is enabled in tg3_get_eeprom_hw_cfg().
16318                  */
16319                 tg3_flag_set(tp, TSO_CAPABLE);
16320         } else {
16321                 tg3_flag_clear(tp, TSO_CAPABLE);
16322                 tg3_flag_clear(tp, TSO_BUG);
16323                 tp->fw_needed = NULL;
16324         }
16325
16326         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16327                 tp->fw_needed = FIRMWARE_TG3;
16328
16329         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16330                 tp->fw_needed = FIRMWARE_TG357766;
16331
16332         tp->irq_max = 1;
16333
16334         if (tg3_flag(tp, 5750_PLUS)) {
16335                 tg3_flag_set(tp, SUPPORT_MSI);
16336                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16337                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16338                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16339                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16340                      tp->pdev_peer == tp->pdev))
16341                         tg3_flag_clear(tp, SUPPORT_MSI);
16342
16343                 if (tg3_flag(tp, 5755_PLUS) ||
16344                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16345                         tg3_flag_set(tp, 1SHOT_MSI);
16346                 }
16347
16348                 if (tg3_flag(tp, 57765_PLUS)) {
16349                         tg3_flag_set(tp, SUPPORT_MSIX);
16350                         tp->irq_max = TG3_IRQ_MAX_VECS;
16351                 }
16352         }
16353
16354         tp->txq_max = 1;
16355         tp->rxq_max = 1;
16356         if (tp->irq_max > 1) {
16357                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16358                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16359
16360                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16361                     tg3_asic_rev(tp) == ASIC_REV_5720)
16362                         tp->txq_max = tp->irq_max - 1;
16363         }
16364
16365         if (tg3_flag(tp, 5755_PLUS) ||
16366             tg3_asic_rev(tp) == ASIC_REV_5906)
16367                 tg3_flag_set(tp, SHORT_DMA_BUG);
16368
16369         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16370                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16371
16372         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16373             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16374             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16375             tg3_asic_rev(tp) == ASIC_REV_5762)
16376                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16377
16378         if (tg3_flag(tp, 57765_PLUS) &&
16379             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16380                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16381
16382         if (!tg3_flag(tp, 5705_PLUS) ||
16383             tg3_flag(tp, 5780_CLASS) ||
16384             tg3_flag(tp, USE_JUMBO_BDFLAG))
16385                 tg3_flag_set(tp, JUMBO_CAPABLE);
16386
16387         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16388                               &pci_state_reg);
16389
16390         if (pci_is_pcie(tp->pdev)) {
16391                 u16 lnkctl;
16392
16393                 tg3_flag_set(tp, PCI_EXPRESS);
16394
16395                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16396                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16397                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16398                                 tg3_flag_clear(tp, HW_TSO_2);
16399                                 tg3_flag_clear(tp, TSO_CAPABLE);
16400                         }
16401                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16402                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16403                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16404                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16405                                 tg3_flag_set(tp, CLKREQ_BUG);
16406                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16407                         tg3_flag_set(tp, L1PLLPD_EN);
16408                 }
16409         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16410                 /* BCM5785 devices are effectively PCIe devices, and should
16411                  * follow PCIe codepaths, but do not have a PCIe capabilities
16412                  * section.
16413                  */
16414                 tg3_flag_set(tp, PCI_EXPRESS);
16415         } else if (!tg3_flag(tp, 5705_PLUS) ||
16416                    tg3_flag(tp, 5780_CLASS)) {
16417                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16418                 if (!tp->pcix_cap) {
16419                         dev_err(&tp->pdev->dev,
16420                                 "Cannot find PCI-X capability, aborting\n");
16421                         return -EIO;
16422                 }
16423
16424                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16425                         tg3_flag_set(tp, PCIX_MODE);
16426         }
16427
16428         /* If we have an AMD 762 or VIA K8T800 chipset, write
16429          * reordering to the mailbox registers done by the host
16430          * controller can cause major troubles.  We read back from
16431          * every mailbox register write to force the writes to be
16432          * posted to the chip in order.
16433          */
16434         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16435             !tg3_flag(tp, PCI_EXPRESS))
16436                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16437
16438         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16439                              &tp->pci_cacheline_sz);
16440         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16441                              &tp->pci_lat_timer);
16442         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16443             tp->pci_lat_timer < 64) {
16444                 tp->pci_lat_timer = 64;
16445                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16446                                       tp->pci_lat_timer);
16447         }
16448
16449         /* Important! -- It is critical that the PCI-X hw workaround
16450          * situation is decided before the first MMIO register access.
16451          */
16452         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16453                 /* 5700 BX chips need to have their TX producer index
16454                  * mailboxes written twice to workaround a bug.
16455                  */
16456                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16457
16458                 /* If we are in PCI-X mode, enable register write workaround.
16459                  *
16460                  * The workaround is to use indirect register accesses
16461                  * for all chip writes not to mailbox registers.
16462                  */
16463                 if (tg3_flag(tp, PCIX_MODE)) {
16464                         u32 pm_reg;
16465
16466                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16467
16468                         /* The chip can have it's power management PCI config
16469                          * space registers clobbered due to this bug.
16470                          * So explicitly force the chip into D0 here.
16471                          */
16472                         pci_read_config_dword(tp->pdev,
16473                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16474                                               &pm_reg);
16475                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16476                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16477                         pci_write_config_dword(tp->pdev,
16478                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16479                                                pm_reg);
16480
16481                         /* Also, force SERR#/PERR# in PCI command. */
16482                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16483                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16484                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16485                 }
16486         }
16487
16488         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16489                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16490         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16491                 tg3_flag_set(tp, PCI_32BIT);
16492
16493         /* Chip-specific fixup from Broadcom driver */
16494         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16495             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16496                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16497                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16498         }
16499
16500         /* Default fast path register access methods */
16501         tp->read32 = tg3_read32;
16502         tp->write32 = tg3_write32;
16503         tp->read32_mbox = tg3_read32;
16504         tp->write32_mbox = tg3_write32;
16505         tp->write32_tx_mbox = tg3_write32;
16506         tp->write32_rx_mbox = tg3_write32;
16507
16508         /* Various workaround register access methods */
16509         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16510                 tp->write32 = tg3_write_indirect_reg32;
16511         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16512                  (tg3_flag(tp, PCI_EXPRESS) &&
16513                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16514                 /*
16515                  * Back to back register writes can cause problems on these
16516                  * chips, the workaround is to read back all reg writes
16517                  * except those to mailbox regs.
16518                  *
16519                  * See tg3_write_indirect_reg32().
16520                  */
16521                 tp->write32 = tg3_write_flush_reg32;
16522         }
16523
16524         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16525                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16526                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16527                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16528         }
16529
16530         if (tg3_flag(tp, ICH_WORKAROUND)) {
16531                 tp->read32 = tg3_read_indirect_reg32;
16532                 tp->write32 = tg3_write_indirect_reg32;
16533                 tp->read32_mbox = tg3_read_indirect_mbox;
16534                 tp->write32_mbox = tg3_write_indirect_mbox;
16535                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16536                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16537
16538                 iounmap(tp->regs);
16539                 tp->regs = NULL;
16540
16541                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16542                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16543                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16544         }
16545         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16546                 tp->read32_mbox = tg3_read32_mbox_5906;
16547                 tp->write32_mbox = tg3_write32_mbox_5906;
16548                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16549                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16550         }
16551
16552         if (tp->write32 == tg3_write_indirect_reg32 ||
16553             (tg3_flag(tp, PCIX_MODE) &&
16554              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16555               tg3_asic_rev(tp) == ASIC_REV_5701)))
16556                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16557
16558         /* The memory arbiter has to be enabled in order for SRAM accesses
16559          * to succeed.  Normally on powerup the tg3 chip firmware will make
16560          * sure it is enabled, but other entities such as system netboot
16561          * code might disable it.
16562          */
16563         val = tr32(MEMARB_MODE);
16564         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16565
16566         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16567         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16568             tg3_flag(tp, 5780_CLASS)) {
16569                 if (tg3_flag(tp, PCIX_MODE)) {
16570                         pci_read_config_dword(tp->pdev,
16571                                               tp->pcix_cap + PCI_X_STATUS,
16572                                               &val);
16573                         tp->pci_fn = val & 0x7;
16574                 }
16575         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16576                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16577                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16578                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16579                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16580                         val = tr32(TG3_CPMU_STATUS);
16581
16582                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16583                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16584                 else
16585                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16586                                      TG3_CPMU_STATUS_FSHFT_5719;
16587         }
16588
16589         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16590                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16591                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16592         }
16593
16594         /* Get eeprom hw config before calling tg3_set_power_state().
16595          * In particular, the TG3_FLAG_IS_NIC flag must be
16596          * determined before calling tg3_set_power_state() so that
16597          * we know whether or not to switch out of Vaux power.
16598          * When the flag is set, it means that GPIO1 is used for eeprom
16599          * write protect and also implies that it is a LOM where GPIOs
16600          * are not used to switch power.
16601          */
16602         tg3_get_eeprom_hw_cfg(tp);
16603
16604         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16605                 tg3_flag_clear(tp, TSO_CAPABLE);
16606                 tg3_flag_clear(tp, TSO_BUG);
16607                 tp->fw_needed = NULL;
16608         }
16609
16610         if (tg3_flag(tp, ENABLE_APE)) {
16611                 /* Allow reads and writes to the
16612                  * APE register and memory space.
16613                  */
16614                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16615                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16616                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16617                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16618                                        pci_state_reg);
16619
16620                 tg3_ape_lock_init(tp);
16621         }
16622
16623         /* Set up tp->grc_local_ctrl before calling
16624          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16625          * will bring 5700's external PHY out of reset.
16626          * It is also used as eeprom write protect on LOMs.
16627          */
16628         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16629         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16630             tg3_flag(tp, EEPROM_WRITE_PROT))
16631                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16632                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16633         /* Unused GPIO3 must be driven as output on 5752 because there
16634          * are no pull-up resistors on unused GPIO pins.
16635          */
16636         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16637                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16638
16639         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16640             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16641             tg3_flag(tp, 57765_CLASS))
16642                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16643
16644         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16645             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16646                 /* Turn off the debug UART. */
16647                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16648                 if (tg3_flag(tp, IS_NIC))
16649                         /* Keep VMain power. */
16650                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16651                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16652         }
16653
16654         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16655                 tp->grc_local_ctrl |=
16656                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16657
16658         /* Switch out of Vaux if it is a NIC */
16659         tg3_pwrsrc_switch_to_vmain(tp);
16660
16661         /* Derive initial jumbo mode from MTU assigned in
16662          * ether_setup() via the alloc_etherdev() call
16663          */
16664         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16665                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16666
16667         /* Determine WakeOnLan speed to use. */
16668         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16669             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16670             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16671             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16672                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16673         } else {
16674                 tg3_flag_set(tp, WOL_SPEED_100MB);
16675         }
16676
16677         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16678                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16679
16680         /* A few boards don't want Ethernet@WireSpeed phy feature */
16681         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16682             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16683              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16684              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16685             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16686             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16687                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16688
16689         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16690             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16691                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16692         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16693                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16694
16695         if (tg3_flag(tp, 5705_PLUS) &&
16696             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16697             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16698             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16699             !tg3_flag(tp, 57765_PLUS)) {
16700                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16701                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16702                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16703                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16704                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16705                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16706                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16707                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16708                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16709                 } else
16710                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16711         }
16712
16713         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16714             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16715                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16716                 if (tp->phy_otp == 0)
16717                         tp->phy_otp = TG3_OTP_DEFAULT;
16718         }
16719
16720         if (tg3_flag(tp, CPMU_PRESENT))
16721                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16722         else
16723                 tp->mi_mode = MAC_MI_MODE_BASE;
16724
16725         tp->coalesce_mode = 0;
16726         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16727             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16728                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16729
16730         /* Set these bits to enable statistics workaround. */
16731         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16732             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16733             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16734             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16735                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16736                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16737         }
16738
16739         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16740             tg3_asic_rev(tp) == ASIC_REV_57780)
16741                 tg3_flag_set(tp, USE_PHYLIB);
16742
16743         err = tg3_mdio_init(tp);
16744         if (err)
16745                 return err;
16746
16747         /* Initialize data/descriptor byte/word swapping. */
16748         val = tr32(GRC_MODE);
16749         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16750             tg3_asic_rev(tp) == ASIC_REV_5762)
16751                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16752                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16753                         GRC_MODE_B2HRX_ENABLE |
16754                         GRC_MODE_HTX2B_ENABLE |
16755                         GRC_MODE_HOST_STACKUP);
16756         else
16757                 val &= GRC_MODE_HOST_STACKUP;
16758
16759         tw32(GRC_MODE, val | tp->grc_mode);
16760
16761         tg3_switch_clocks(tp);
16762
16763         /* Clear this out for sanity. */
16764         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16765
16766         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16767         tw32(TG3PCI_REG_BASE_ADDR, 0);
16768
16769         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16770                               &pci_state_reg);
16771         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16772             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16773                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16774                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16775                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16776                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16777                         void __iomem *sram_base;
16778
16779                         /* Write some dummy words into the SRAM status block
16780                          * area, see if it reads back correctly.  If the return
16781                          * value is bad, force enable the PCIX workaround.
16782                          */
16783                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16784
16785                         writel(0x00000000, sram_base);
16786                         writel(0x00000000, sram_base + 4);
16787                         writel(0xffffffff, sram_base + 4);
16788                         if (readl(sram_base) != 0x00000000)
16789                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16790                 }
16791         }
16792
16793         udelay(50);
16794         tg3_nvram_init(tp);
16795
16796         /* If the device has an NVRAM, no need to load patch firmware */
16797         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16798             !tg3_flag(tp, NO_NVRAM))
16799                 tp->fw_needed = NULL;
16800
16801         grc_misc_cfg = tr32(GRC_MISC_CFG);
16802         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16803
16804         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16805             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16806              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16807                 tg3_flag_set(tp, IS_5788);
16808
16809         if (!tg3_flag(tp, IS_5788) &&
16810             tg3_asic_rev(tp) != ASIC_REV_5700)
16811                 tg3_flag_set(tp, TAGGED_STATUS);
16812         if (tg3_flag(tp, TAGGED_STATUS)) {
16813                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16814                                       HOSTCC_MODE_CLRTICK_TXBD);
16815
16816                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16817                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16818                                        tp->misc_host_ctrl);
16819         }
16820
16821         /* Preserve the APE MAC_MODE bits */
16822         if (tg3_flag(tp, ENABLE_APE))
16823                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16824         else
16825                 tp->mac_mode = 0;
16826
16827         if (tg3_10_100_only_device(tp, ent))
16828                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16829
16830         err = tg3_phy_probe(tp);
16831         if (err) {
16832                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16833                 /* ... but do not return immediately ... */
16834                 tg3_mdio_fini(tp);
16835         }
16836
16837         tg3_read_vpd(tp);
16838         tg3_read_fw_ver(tp);
16839
16840         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16841                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16842         } else {
16843                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16844                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16845                 else
16846                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16847         }
16848
16849         /* 5700 {AX,BX} chips have a broken status block link
16850          * change bit implementation, so we must use the
16851          * status register in those cases.
16852          */
16853         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16854                 tg3_flag_set(tp, USE_LINKCHG_REG);
16855         else
16856                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16857
16858         /* The led_ctrl is set during tg3_phy_probe, here we might
16859          * have to force the link status polling mechanism based
16860          * upon subsystem IDs.
16861          */
16862         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16863             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16864             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16865                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16866                 tg3_flag_set(tp, USE_LINKCHG_REG);
16867         }
16868
16869         /* For all SERDES we poll the MAC status register. */
16870         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16871                 tg3_flag_set(tp, POLL_SERDES);
16872         else
16873                 tg3_flag_clear(tp, POLL_SERDES);
16874
16875         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16876                 tg3_flag_set(tp, POLL_CPMU_LINK);
16877
16878         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16879         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16880         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16881             tg3_flag(tp, PCIX_MODE)) {
16882                 tp->rx_offset = NET_SKB_PAD;
16883 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16884                 tp->rx_copy_thresh = ~(u16)0;
16885 #endif
16886         }
16887
16888         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16889         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16890         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16891
16892         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16893
16894         /* Increment the rx prod index on the rx std ring by at most
16895          * 8 for these chips to workaround hw errata.
16896          */
16897         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16898             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16899             tg3_asic_rev(tp) == ASIC_REV_5755)
16900                 tp->rx_std_max_post = 8;
16901
16902         if (tg3_flag(tp, ASPM_WORKAROUND))
16903                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16904                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16905
16906         return err;
16907 }
16908
16909 #ifdef CONFIG_SPARC
16910 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16911 {
16912         struct net_device *dev = tp->dev;
16913         struct pci_dev *pdev = tp->pdev;
16914         struct device_node *dp = pci_device_to_OF_node(pdev);
16915         const unsigned char *addr;
16916         int len;
16917
16918         addr = of_get_property(dp, "local-mac-address", &len);
16919         if (addr && len == ETH_ALEN) {
16920                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16921                 return 0;
16922         }
16923         return -ENODEV;
16924 }
16925
16926 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16927 {
16928         struct net_device *dev = tp->dev;
16929
16930         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16931         return 0;
16932 }
16933 #endif
16934
16935 static int tg3_get_device_address(struct tg3 *tp)
16936 {
16937         struct net_device *dev = tp->dev;
16938         u32 hi, lo, mac_offset;
16939         int addr_ok = 0;
16940         int err;
16941
16942 #ifdef CONFIG_SPARC
16943         if (!tg3_get_macaddr_sparc(tp))
16944                 return 0;
16945 #endif
16946
16947         if (tg3_flag(tp, IS_SSB_CORE)) {
16948                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16949                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16950                         return 0;
16951         }
16952
16953         mac_offset = 0x7c;
16954         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16955             tg3_flag(tp, 5780_CLASS)) {
16956                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16957                         mac_offset = 0xcc;
16958                 if (tg3_nvram_lock(tp))
16959                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16960                 else
16961                         tg3_nvram_unlock(tp);
16962         } else if (tg3_flag(tp, 5717_PLUS)) {
16963                 if (tp->pci_fn & 1)
16964                         mac_offset = 0xcc;
16965                 if (tp->pci_fn > 1)
16966                         mac_offset += 0x18c;
16967         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16968                 mac_offset = 0x10;
16969
16970         /* First try to get it from MAC address mailbox. */
16971         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16972         if ((hi >> 16) == 0x484b) {
16973                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16974                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16975
16976                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16977                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16978                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16979                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16980                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16981
16982                 /* Some old bootcode may report a 0 MAC address in SRAM */
16983                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16984         }
16985         if (!addr_ok) {
16986                 /* Next, try NVRAM. */
16987                 if (!tg3_flag(tp, NO_NVRAM) &&
16988                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16989                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16990                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16991                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16992                 }
16993                 /* Finally just fetch it out of the MAC control regs. */
16994                 else {
16995                         hi = tr32(MAC_ADDR_0_HIGH);
16996                         lo = tr32(MAC_ADDR_0_LOW);
16997
16998                         dev->dev_addr[5] = lo & 0xff;
16999                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17000                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17001                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17002                         dev->dev_addr[1] = hi & 0xff;
17003                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17004                 }
17005         }
17006
17007         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17008 #ifdef CONFIG_SPARC
17009                 if (!tg3_get_default_macaddr_sparc(tp))
17010                         return 0;
17011 #endif
17012                 return -EINVAL;
17013         }
17014         return 0;
17015 }
17016
17017 #define BOUNDARY_SINGLE_CACHELINE       1
17018 #define BOUNDARY_MULTI_CACHELINE        2
17019
17020 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17021 {
17022         int cacheline_size;
17023         u8 byte;
17024         int goal;
17025
17026         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17027         if (byte == 0)
17028                 cacheline_size = 1024;
17029         else
17030                 cacheline_size = (int) byte * 4;
17031
17032         /* On 5703 and later chips, the boundary bits have no
17033          * effect.
17034          */
17035         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17036             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17037             !tg3_flag(tp, PCI_EXPRESS))
17038                 goto out;
17039
17040 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17041         goal = BOUNDARY_MULTI_CACHELINE;
17042 #else
17043 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17044         goal = BOUNDARY_SINGLE_CACHELINE;
17045 #else
17046         goal = 0;
17047 #endif
17048 #endif
17049
17050         if (tg3_flag(tp, 57765_PLUS)) {
17051                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17052                 goto out;
17053         }
17054
17055         if (!goal)
17056                 goto out;
17057
17058         /* PCI controllers on most RISC systems tend to disconnect
17059          * when a device tries to burst across a cache-line boundary.
17060          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17061          *
17062          * Unfortunately, for PCI-E there are only limited
17063          * write-side controls for this, and thus for reads
17064          * we will still get the disconnects.  We'll also waste
17065          * these PCI cycles for both read and write for chips
17066          * other than 5700 and 5701 which do not implement the
17067          * boundary bits.
17068          */
17069         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17070                 switch (cacheline_size) {
17071                 case 16:
17072                 case 32:
17073                 case 64:
17074                 case 128:
17075                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17076                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17077                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17078                         } else {
17079                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17080                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17081                         }
17082                         break;
17083
17084                 case 256:
17085                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17086                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17087                         break;
17088
17089                 default:
17090                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17091                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17092                         break;
17093                 }
17094         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17095                 switch (cacheline_size) {
17096                 case 16:
17097                 case 32:
17098                 case 64:
17099                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17100                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17101                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17102                                 break;
17103                         }
17104                         /* fallthrough */
17105                 case 128:
17106                 default:
17107                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17108                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17109                         break;
17110                 }
17111         } else {
17112                 switch (cacheline_size) {
17113                 case 16:
17114                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17115                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17116                                         DMA_RWCTRL_WRITE_BNDRY_16);
17117                                 break;
17118                         }
17119                         /* fallthrough */
17120                 case 32:
17121                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17122                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17123                                         DMA_RWCTRL_WRITE_BNDRY_32);
17124                                 break;
17125                         }
17126                         /* fallthrough */
17127                 case 64:
17128                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17129                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17130                                         DMA_RWCTRL_WRITE_BNDRY_64);
17131                                 break;
17132                         }
17133                         /* fallthrough */
17134                 case 128:
17135                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17136                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17137                                         DMA_RWCTRL_WRITE_BNDRY_128);
17138                                 break;
17139                         }
17140                         /* fallthrough */
17141                 case 256:
17142                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17143                                 DMA_RWCTRL_WRITE_BNDRY_256);
17144                         break;
17145                 case 512:
17146                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17147                                 DMA_RWCTRL_WRITE_BNDRY_512);
17148                         break;
17149                 case 1024:
17150                 default:
17151                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17152                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17153                         break;
17154                 }
17155         }
17156
17157 out:
17158         return val;
17159 }
17160
17161 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17162                            int size, bool to_device)
17163 {
17164         struct tg3_internal_buffer_desc test_desc;
17165         u32 sram_dma_descs;
17166         int i, ret;
17167
17168         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17169
17170         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17171         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17172         tw32(RDMAC_STATUS, 0);
17173         tw32(WDMAC_STATUS, 0);
17174
17175         tw32(BUFMGR_MODE, 0);
17176         tw32(FTQ_RESET, 0);
17177
17178         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17179         test_desc.addr_lo = buf_dma & 0xffffffff;
17180         test_desc.nic_mbuf = 0x00002100;
17181         test_desc.len = size;
17182
17183         /*
17184          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17185          * the *second* time the tg3 driver was getting loaded after an
17186          * initial scan.
17187          *
17188          * Broadcom tells me:
17189          *   ...the DMA engine is connected to the GRC block and a DMA
17190          *   reset may affect the GRC block in some unpredictable way...
17191          *   The behavior of resets to individual blocks has not been tested.
17192          *
17193          * Broadcom noted the GRC reset will also reset all sub-components.
17194          */
17195         if (to_device) {
17196                 test_desc.cqid_sqid = (13 << 8) | 2;
17197
17198                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17199                 udelay(40);
17200         } else {
17201                 test_desc.cqid_sqid = (16 << 8) | 7;
17202
17203                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17204                 udelay(40);
17205         }
17206         test_desc.flags = 0x00000005;
17207
17208         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17209                 u32 val;
17210
17211                 val = *(((u32 *)&test_desc) + i);
17212                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17213                                        sram_dma_descs + (i * sizeof(u32)));
17214                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17215         }
17216         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17217
17218         if (to_device)
17219                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17220         else
17221                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17222
17223         ret = -ENODEV;
17224         for (i = 0; i < 40; i++) {
17225                 u32 val;
17226
17227                 if (to_device)
17228                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17229                 else
17230                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17231                 if ((val & 0xffff) == sram_dma_descs) {
17232                         ret = 0;
17233                         break;
17234                 }
17235
17236                 udelay(100);
17237         }
17238
17239         return ret;
17240 }
17241
17242 #define TEST_BUFFER_SIZE        0x2000
17243
17244 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17245         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17246         { },
17247 };
17248
17249 static int tg3_test_dma(struct tg3 *tp)
17250 {
17251         dma_addr_t buf_dma;
17252         u32 *buf, saved_dma_rwctrl;
17253         int ret = 0;
17254
17255         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17256                                  &buf_dma, GFP_KERNEL);
17257         if (!buf) {
17258                 ret = -ENOMEM;
17259                 goto out_nofree;
17260         }
17261
17262         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17263                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17264
17265         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17266
17267         if (tg3_flag(tp, 57765_PLUS))
17268                 goto out;
17269
17270         if (tg3_flag(tp, PCI_EXPRESS)) {
17271                 /* DMA read watermark not used on PCIE */
17272                 tp->dma_rwctrl |= 0x00180000;
17273         } else if (!tg3_flag(tp, PCIX_MODE)) {
17274                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17275                     tg3_asic_rev(tp) == ASIC_REV_5750)
17276                         tp->dma_rwctrl |= 0x003f0000;
17277                 else
17278                         tp->dma_rwctrl |= 0x003f000f;
17279         } else {
17280                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17281                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17282                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17283                         u32 read_water = 0x7;
17284
17285                         /* If the 5704 is behind the EPB bridge, we can
17286                          * do the less restrictive ONE_DMA workaround for
17287                          * better performance.
17288                          */
17289                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17290                             tg3_asic_rev(tp) == ASIC_REV_5704)
17291                                 tp->dma_rwctrl |= 0x8000;
17292                         else if (ccval == 0x6 || ccval == 0x7)
17293                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17294
17295                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17296                                 read_water = 4;
17297                         /* Set bit 23 to enable PCIX hw bug fix */
17298                         tp->dma_rwctrl |=
17299                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17300                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17301                                 (1 << 23);
17302                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17303                         /* 5780 always in PCIX mode */
17304                         tp->dma_rwctrl |= 0x00144000;
17305                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17306                         /* 5714 always in PCIX mode */
17307                         tp->dma_rwctrl |= 0x00148000;
17308                 } else {
17309                         tp->dma_rwctrl |= 0x001b000f;
17310                 }
17311         }
17312         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17313                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17314
17315         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17316             tg3_asic_rev(tp) == ASIC_REV_5704)
17317                 tp->dma_rwctrl &= 0xfffffff0;
17318
17319         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17320             tg3_asic_rev(tp) == ASIC_REV_5701) {
17321                 /* Remove this if it causes problems for some boards. */
17322                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17323
17324                 /* On 5700/5701 chips, we need to set this bit.
17325                  * Otherwise the chip will issue cacheline transactions
17326                  * to streamable DMA memory with not all the byte
17327                  * enables turned on.  This is an error on several
17328                  * RISC PCI controllers, in particular sparc64.
17329                  *
17330                  * On 5703/5704 chips, this bit has been reassigned
17331                  * a different meaning.  In particular, it is used
17332                  * on those chips to enable a PCI-X workaround.
17333                  */
17334                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17335         }
17336
17337         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17338
17339
17340         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17341             tg3_asic_rev(tp) != ASIC_REV_5701)
17342                 goto out;
17343
17344         /* It is best to perform DMA test with maximum write burst size
17345          * to expose the 5700/5701 write DMA bug.
17346          */
17347         saved_dma_rwctrl = tp->dma_rwctrl;
17348         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17349         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17350
17351         while (1) {
17352                 u32 *p = buf, i;
17353
17354                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17355                         p[i] = i;
17356
17357                 /* Send the buffer to the chip. */
17358                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17359                 if (ret) {
17360                         dev_err(&tp->pdev->dev,
17361                                 "%s: Buffer write failed. err = %d\n",
17362                                 __func__, ret);
17363                         break;
17364                 }
17365
17366                 /* Now read it back. */
17367                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17368                 if (ret) {
17369                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17370                                 "err = %d\n", __func__, ret);
17371                         break;
17372                 }
17373
17374                 /* Verify it. */
17375                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17376                         if (p[i] == i)
17377                                 continue;
17378
17379                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17380                             DMA_RWCTRL_WRITE_BNDRY_16) {
17381                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17382                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17383                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17384                                 break;
17385                         } else {
17386                                 dev_err(&tp->pdev->dev,
17387                                         "%s: Buffer corrupted on read back! "
17388                                         "(%d != %d)\n", __func__, p[i], i);
17389                                 ret = -ENODEV;
17390                                 goto out;
17391                         }
17392                 }
17393
17394                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17395                         /* Success. */
17396                         ret = 0;
17397                         break;
17398                 }
17399         }
17400         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17401             DMA_RWCTRL_WRITE_BNDRY_16) {
17402                 /* DMA test passed without adjusting DMA boundary,
17403                  * now look for chipsets that are known to expose the
17404                  * DMA bug without failing the test.
17405                  */
17406                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17407                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17408                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17409                 } else {
17410                         /* Safe to use the calculated DMA boundary. */
17411                         tp->dma_rwctrl = saved_dma_rwctrl;
17412                 }
17413
17414                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17415         }
17416
17417 out:
17418         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17419 out_nofree:
17420         return ret;
17421 }
17422
17423 static void tg3_init_bufmgr_config(struct tg3 *tp)
17424 {
17425         if (tg3_flag(tp, 57765_PLUS)) {
17426                 tp->bufmgr_config.mbuf_read_dma_low_water =
17427                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17428                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17429                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17430                 tp->bufmgr_config.mbuf_high_water =
17431                         DEFAULT_MB_HIGH_WATER_57765;
17432
17433                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17434                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17435                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17436                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17437                 tp->bufmgr_config.mbuf_high_water_jumbo =
17438                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17439         } else if (tg3_flag(tp, 5705_PLUS)) {
17440                 tp->bufmgr_config.mbuf_read_dma_low_water =
17441                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17442                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17443                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17444                 tp->bufmgr_config.mbuf_high_water =
17445                         DEFAULT_MB_HIGH_WATER_5705;
17446                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17447                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17448                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17449                         tp->bufmgr_config.mbuf_high_water =
17450                                 DEFAULT_MB_HIGH_WATER_5906;
17451                 }
17452
17453                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17454                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17455                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17456                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17457                 tp->bufmgr_config.mbuf_high_water_jumbo =
17458                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17459         } else {
17460                 tp->bufmgr_config.mbuf_read_dma_low_water =
17461                         DEFAULT_MB_RDMA_LOW_WATER;
17462                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17463                         DEFAULT_MB_MACRX_LOW_WATER;
17464                 tp->bufmgr_config.mbuf_high_water =
17465                         DEFAULT_MB_HIGH_WATER;
17466
17467                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17468                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17469                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17470                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17471                 tp->bufmgr_config.mbuf_high_water_jumbo =
17472                         DEFAULT_MB_HIGH_WATER_JUMBO;
17473         }
17474
17475         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17476         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17477 }
17478
17479 static char *tg3_phy_string(struct tg3 *tp)
17480 {
17481         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17482         case TG3_PHY_ID_BCM5400:        return "5400";
17483         case TG3_PHY_ID_BCM5401:        return "5401";
17484         case TG3_PHY_ID_BCM5411:        return "5411";
17485         case TG3_PHY_ID_BCM5701:        return "5701";
17486         case TG3_PHY_ID_BCM5703:        return "5703";
17487         case TG3_PHY_ID_BCM5704:        return "5704";
17488         case TG3_PHY_ID_BCM5705:        return "5705";
17489         case TG3_PHY_ID_BCM5750:        return "5750";
17490         case TG3_PHY_ID_BCM5752:        return "5752";
17491         case TG3_PHY_ID_BCM5714:        return "5714";
17492         case TG3_PHY_ID_BCM5780:        return "5780";
17493         case TG3_PHY_ID_BCM5755:        return "5755";
17494         case TG3_PHY_ID_BCM5787:        return "5787";
17495         case TG3_PHY_ID_BCM5784:        return "5784";
17496         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17497         case TG3_PHY_ID_BCM5906:        return "5906";
17498         case TG3_PHY_ID_BCM5761:        return "5761";
17499         case TG3_PHY_ID_BCM5718C:       return "5718C";
17500         case TG3_PHY_ID_BCM5718S:       return "5718S";
17501         case TG3_PHY_ID_BCM57765:       return "57765";
17502         case TG3_PHY_ID_BCM5719C:       return "5719C";
17503         case TG3_PHY_ID_BCM5720C:       return "5720C";
17504         case TG3_PHY_ID_BCM5762:        return "5762C";
17505         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17506         case 0:                 return "serdes";
17507         default:                return "unknown";
17508         }
17509 }
17510
17511 static char *tg3_bus_string(struct tg3 *tp, char *str)
17512 {
17513         if (tg3_flag(tp, PCI_EXPRESS)) {
17514                 strcpy(str, "PCI Express");
17515                 return str;
17516         } else if (tg3_flag(tp, PCIX_MODE)) {
17517                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17518
17519                 strcpy(str, "PCIX:");
17520
17521                 if ((clock_ctrl == 7) ||
17522                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17523                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17524                         strcat(str, "133MHz");
17525                 else if (clock_ctrl == 0)
17526                         strcat(str, "33MHz");
17527                 else if (clock_ctrl == 2)
17528                         strcat(str, "50MHz");
17529                 else if (clock_ctrl == 4)
17530                         strcat(str, "66MHz");
17531                 else if (clock_ctrl == 6)
17532                         strcat(str, "100MHz");
17533         } else {
17534                 strcpy(str, "PCI:");
17535                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17536                         strcat(str, "66MHz");
17537                 else
17538                         strcat(str, "33MHz");
17539         }
17540         if (tg3_flag(tp, PCI_32BIT))
17541                 strcat(str, ":32-bit");
17542         else
17543                 strcat(str, ":64-bit");
17544         return str;
17545 }
17546
17547 static void tg3_init_coal(struct tg3 *tp)
17548 {
17549         struct ethtool_coalesce *ec = &tp->coal;
17550
17551         memset(ec, 0, sizeof(*ec));
17552         ec->cmd = ETHTOOL_GCOALESCE;
17553         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17554         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17555         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17556         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17557         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17558         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17559         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17560         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17561         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17562
17563         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17564                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17565                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17566                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17567                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17568                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17569         }
17570
17571         if (tg3_flag(tp, 5705_PLUS)) {
17572                 ec->rx_coalesce_usecs_irq = 0;
17573                 ec->tx_coalesce_usecs_irq = 0;
17574                 ec->stats_block_coalesce_usecs = 0;
17575         }
17576 }
17577
17578 static int tg3_init_one(struct pci_dev *pdev,
17579                                   const struct pci_device_id *ent)
17580 {
17581         struct net_device *dev;
17582         struct tg3 *tp;
17583         int i, err;
17584         u32 sndmbx, rcvmbx, intmbx;
17585         char str[40];
17586         u64 dma_mask, persist_dma_mask;
17587         netdev_features_t features = 0;
17588
17589         printk_once(KERN_INFO "%s\n", version);
17590
17591         err = pci_enable_device(pdev);
17592         if (err) {
17593                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17594                 return err;
17595         }
17596
17597         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17598         if (err) {
17599                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17600                 goto err_out_disable_pdev;
17601         }
17602
17603         pci_set_master(pdev);
17604
17605         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17606         if (!dev) {
17607                 err = -ENOMEM;
17608                 goto err_out_free_res;
17609         }
17610
17611         SET_NETDEV_DEV(dev, &pdev->dev);
17612
17613         tp = netdev_priv(dev);
17614         tp->pdev = pdev;
17615         tp->dev = dev;
17616         tp->rx_mode = TG3_DEF_RX_MODE;
17617         tp->tx_mode = TG3_DEF_TX_MODE;
17618         tp->irq_sync = 1;
17619         tp->pcierr_recovery = false;
17620
17621         if (tg3_debug > 0)
17622                 tp->msg_enable = tg3_debug;
17623         else
17624                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17625
17626         if (pdev_is_ssb_gige_core(pdev)) {
17627                 tg3_flag_set(tp, IS_SSB_CORE);
17628                 if (ssb_gige_must_flush_posted_writes(pdev))
17629                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17630                 if (ssb_gige_one_dma_at_once(pdev))
17631                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17632                 if (ssb_gige_have_roboswitch(pdev)) {
17633                         tg3_flag_set(tp, USE_PHYLIB);
17634                         tg3_flag_set(tp, ROBOSWITCH);
17635                 }
17636                 if (ssb_gige_is_rgmii(pdev))
17637                         tg3_flag_set(tp, RGMII_MODE);
17638         }
17639
17640         /* The word/byte swap controls here control register access byte
17641          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17642          * setting below.
17643          */
17644         tp->misc_host_ctrl =
17645                 MISC_HOST_CTRL_MASK_PCI_INT |
17646                 MISC_HOST_CTRL_WORD_SWAP |
17647                 MISC_HOST_CTRL_INDIR_ACCESS |
17648                 MISC_HOST_CTRL_PCISTATE_RW;
17649
17650         /* The NONFRM (non-frame) byte/word swap controls take effect
17651          * on descriptor entries, anything which isn't packet data.
17652          *
17653          * The StrongARM chips on the board (one for tx, one for rx)
17654          * are running in big-endian mode.
17655          */
17656         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17657                         GRC_MODE_WSWAP_NONFRM_DATA);
17658 #ifdef __BIG_ENDIAN
17659         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17660 #endif
17661         spin_lock_init(&tp->lock);
17662         spin_lock_init(&tp->indirect_lock);
17663         INIT_WORK(&tp->reset_task, tg3_reset_task);
17664
17665         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17666         if (!tp->regs) {
17667                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17668                 err = -ENOMEM;
17669                 goto err_out_free_dev;
17670         }
17671
17672         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17673             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17674             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17675             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17676             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17677             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17678             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17679             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17680             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17681             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17682             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17683             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17684             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17685             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17686             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17687                 tg3_flag_set(tp, ENABLE_APE);
17688                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17689                 if (!tp->aperegs) {
17690                         dev_err(&pdev->dev,
17691                                 "Cannot map APE registers, aborting\n");
17692                         err = -ENOMEM;
17693                         goto err_out_iounmap;
17694                 }
17695         }
17696
17697         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17698         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17699
17700         dev->ethtool_ops = &tg3_ethtool_ops;
17701         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17702         dev->netdev_ops = &tg3_netdev_ops;
17703         dev->irq = pdev->irq;
17704
17705         err = tg3_get_invariants(tp, ent);
17706         if (err) {
17707                 dev_err(&pdev->dev,
17708                         "Problem fetching invariants of chip, aborting\n");
17709                 goto err_out_apeunmap;
17710         }
17711
17712         /* The EPB bridge inside 5714, 5715, and 5780 and any
17713          * device behind the EPB cannot support DMA addresses > 40-bit.
17714          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17715          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17716          * do DMA address check in tg3_start_xmit().
17717          */
17718         if (tg3_flag(tp, IS_5788))
17719                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17720         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17721                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17722 #ifdef CONFIG_HIGHMEM
17723                 dma_mask = DMA_BIT_MASK(64);
17724 #endif
17725         } else
17726                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17727
17728         /* Configure DMA attributes. */
17729         if (dma_mask > DMA_BIT_MASK(32)) {
17730                 err = pci_set_dma_mask(pdev, dma_mask);
17731                 if (!err) {
17732                         features |= NETIF_F_HIGHDMA;
17733                         err = pci_set_consistent_dma_mask(pdev,
17734                                                           persist_dma_mask);
17735                         if (err < 0) {
17736                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17737                                         "DMA for consistent allocations\n");
17738                                 goto err_out_apeunmap;
17739                         }
17740                 }
17741         }
17742         if (err || dma_mask == DMA_BIT_MASK(32)) {
17743                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17744                 if (err) {
17745                         dev_err(&pdev->dev,
17746                                 "No usable DMA configuration, aborting\n");
17747                         goto err_out_apeunmap;
17748                 }
17749         }
17750
17751         tg3_init_bufmgr_config(tp);
17752
17753         /* 5700 B0 chips do not support checksumming correctly due
17754          * to hardware bugs.
17755          */
17756         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17757                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17758
17759                 if (tg3_flag(tp, 5755_PLUS))
17760                         features |= NETIF_F_IPV6_CSUM;
17761         }
17762
17763         /* TSO is on by default on chips that support hardware TSO.
17764          * Firmware TSO on older chips gives lower performance, so it
17765          * is off by default, but can be enabled using ethtool.
17766          */
17767         if ((tg3_flag(tp, HW_TSO_1) ||
17768              tg3_flag(tp, HW_TSO_2) ||
17769              tg3_flag(tp, HW_TSO_3)) &&
17770             (features & NETIF_F_IP_CSUM))
17771                 features |= NETIF_F_TSO;
17772         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17773                 if (features & NETIF_F_IPV6_CSUM)
17774                         features |= NETIF_F_TSO6;
17775                 if (tg3_flag(tp, HW_TSO_3) ||
17776                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17777                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17778                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17779                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17780                     tg3_asic_rev(tp) == ASIC_REV_57780)
17781                         features |= NETIF_F_TSO_ECN;
17782         }
17783
17784         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17785                          NETIF_F_HW_VLAN_CTAG_RX;
17786         dev->vlan_features |= features;
17787
17788         /*
17789          * Add loopback capability only for a subset of devices that support
17790          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17791          * loopback for the remaining devices.
17792          */
17793         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17794             !tg3_flag(tp, CPMU_PRESENT))
17795                 /* Add the loopback capability */
17796                 features |= NETIF_F_LOOPBACK;
17797
17798         dev->hw_features |= features;
17799         dev->priv_flags |= IFF_UNICAST_FLT;
17800
17801         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17802         dev->min_mtu = TG3_MIN_MTU;
17803         dev->max_mtu = TG3_MAX_MTU(tp);
17804
17805         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17806             !tg3_flag(tp, TSO_CAPABLE) &&
17807             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17808                 tg3_flag_set(tp, MAX_RXPEND_64);
17809                 tp->rx_pending = 63;
17810         }
17811
17812         err = tg3_get_device_address(tp);
17813         if (err) {
17814                 dev_err(&pdev->dev,
17815                         "Could not obtain valid ethernet address, aborting\n");
17816                 goto err_out_apeunmap;
17817         }
17818
17819         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17820         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17821         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17822         for (i = 0; i < tp->irq_max; i++) {
17823                 struct tg3_napi *tnapi = &tp->napi[i];
17824
17825                 tnapi->tp = tp;
17826                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17827
17828                 tnapi->int_mbox = intmbx;
17829                 if (i <= 4)
17830                         intmbx += 0x8;
17831                 else
17832                         intmbx += 0x4;
17833
17834                 tnapi->consmbox = rcvmbx;
17835                 tnapi->prodmbox = sndmbx;
17836
17837                 if (i)
17838                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17839                 else
17840                         tnapi->coal_now = HOSTCC_MODE_NOW;
17841
17842                 if (!tg3_flag(tp, SUPPORT_MSIX))
17843                         break;
17844
17845                 /*
17846                  * If we support MSIX, we'll be using RSS.  If we're using
17847                  * RSS, the first vector only handles link interrupts and the
17848                  * remaining vectors handle rx and tx interrupts.  Reuse the
17849                  * mailbox values for the next iteration.  The values we setup
17850                  * above are still useful for the single vectored mode.
17851                  */
17852                 if (!i)
17853                         continue;
17854
17855                 rcvmbx += 0x8;
17856
17857                 if (sndmbx & 0x4)
17858                         sndmbx -= 0x4;
17859                 else
17860                         sndmbx += 0xc;
17861         }
17862
17863         /*
17864          * Reset chip in case UNDI or EFI driver did not shutdown
17865          * DMA self test will enable WDMAC and we'll see (spurious)
17866          * pending DMA on the PCI bus at that point.
17867          */
17868         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17869             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17870                 tg3_full_lock(tp, 0);
17871                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17872                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17873                 tg3_full_unlock(tp);
17874         }
17875
17876         err = tg3_test_dma(tp);
17877         if (err) {
17878                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17879                 goto err_out_apeunmap;
17880         }
17881
17882         tg3_init_coal(tp);
17883
17884         pci_set_drvdata(pdev, dev);
17885
17886         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17887             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17888             tg3_asic_rev(tp) == ASIC_REV_5762)
17889                 tg3_flag_set(tp, PTP_CAPABLE);
17890
17891         tg3_timer_init(tp);
17892
17893         tg3_carrier_off(tp);
17894
17895         err = register_netdev(dev);
17896         if (err) {
17897                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17898                 goto err_out_apeunmap;
17899         }
17900
17901         if (tg3_flag(tp, PTP_CAPABLE)) {
17902                 tg3_ptp_init(tp);
17903                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17904                                                    &tp->pdev->dev);
17905                 if (IS_ERR(tp->ptp_clock))
17906                         tp->ptp_clock = NULL;
17907         }
17908
17909         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17910                     tp->board_part_number,
17911                     tg3_chip_rev_id(tp),
17912                     tg3_bus_string(tp, str),
17913                     dev->dev_addr);
17914
17915         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17916                 char *ethtype;
17917
17918                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17919                         ethtype = "10/100Base-TX";
17920                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17921                         ethtype = "1000Base-SX";
17922                 else
17923                         ethtype = "10/100/1000Base-T";
17924
17925                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17926                             "(WireSpeed[%d], EEE[%d])\n",
17927                             tg3_phy_string(tp), ethtype,
17928                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17929                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17930         }
17931
17932         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17933                     (dev->features & NETIF_F_RXCSUM) != 0,
17934                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17935                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17936                     tg3_flag(tp, ENABLE_ASF) != 0,
17937                     tg3_flag(tp, TSO_CAPABLE) != 0);
17938         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17939                     tp->dma_rwctrl,
17940                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17941                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17942
17943         pci_save_state(pdev);
17944
17945         return 0;
17946
17947 err_out_apeunmap:
17948         if (tp->aperegs) {
17949                 iounmap(tp->aperegs);
17950                 tp->aperegs = NULL;
17951         }
17952
17953 err_out_iounmap:
17954         if (tp->regs) {
17955                 iounmap(tp->regs);
17956                 tp->regs = NULL;
17957         }
17958
17959 err_out_free_dev:
17960         free_netdev(dev);
17961
17962 err_out_free_res:
17963         pci_release_regions(pdev);
17964
17965 err_out_disable_pdev:
17966         if (pci_is_enabled(pdev))
17967                 pci_disable_device(pdev);
17968         return err;
17969 }
17970
17971 static void tg3_remove_one(struct pci_dev *pdev)
17972 {
17973         struct net_device *dev = pci_get_drvdata(pdev);
17974
17975         if (dev) {
17976                 struct tg3 *tp = netdev_priv(dev);
17977
17978                 tg3_ptp_fini(tp);
17979
17980                 release_firmware(tp->fw);
17981
17982                 tg3_reset_task_cancel(tp);
17983
17984                 if (tg3_flag(tp, USE_PHYLIB)) {
17985                         tg3_phy_fini(tp);
17986                         tg3_mdio_fini(tp);
17987                 }
17988
17989                 unregister_netdev(dev);
17990                 if (tp->aperegs) {
17991                         iounmap(tp->aperegs);
17992                         tp->aperegs = NULL;
17993                 }
17994                 if (tp->regs) {
17995                         iounmap(tp->regs);
17996                         tp->regs = NULL;
17997                 }
17998                 free_netdev(dev);
17999                 pci_release_regions(pdev);
18000                 pci_disable_device(pdev);
18001         }
18002 }
18003
18004 #ifdef CONFIG_PM_SLEEP
18005 static int tg3_suspend(struct device *device)
18006 {
18007         struct pci_dev *pdev = to_pci_dev(device);
18008         struct net_device *dev = pci_get_drvdata(pdev);
18009         struct tg3 *tp = netdev_priv(dev);
18010         int err = 0;
18011
18012         rtnl_lock();
18013
18014         if (!netif_running(dev))
18015                 goto unlock;
18016
18017         tg3_reset_task_cancel(tp);
18018         tg3_phy_stop(tp);
18019         tg3_netif_stop(tp);
18020
18021         tg3_timer_stop(tp);
18022
18023         tg3_full_lock(tp, 1);
18024         tg3_disable_ints(tp);
18025         tg3_full_unlock(tp);
18026
18027         netif_device_detach(dev);
18028
18029         tg3_full_lock(tp, 0);
18030         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18031         tg3_flag_clear(tp, INIT_COMPLETE);
18032         tg3_full_unlock(tp);
18033
18034         err = tg3_power_down_prepare(tp);
18035         if (err) {
18036                 int err2;
18037
18038                 tg3_full_lock(tp, 0);
18039
18040                 tg3_flag_set(tp, INIT_COMPLETE);
18041                 err2 = tg3_restart_hw(tp, true);
18042                 if (err2)
18043                         goto out;
18044
18045                 tg3_timer_start(tp);
18046
18047                 netif_device_attach(dev);
18048                 tg3_netif_start(tp);
18049
18050 out:
18051                 tg3_full_unlock(tp);
18052
18053                 if (!err2)
18054                         tg3_phy_start(tp);
18055         }
18056
18057 unlock:
18058         rtnl_unlock();
18059         return err;
18060 }
18061
18062 static int tg3_resume(struct device *device)
18063 {
18064         struct pci_dev *pdev = to_pci_dev(device);
18065         struct net_device *dev = pci_get_drvdata(pdev);
18066         struct tg3 *tp = netdev_priv(dev);
18067         int err = 0;
18068
18069         rtnl_lock();
18070
18071         if (!netif_running(dev))
18072                 goto unlock;
18073
18074         netif_device_attach(dev);
18075
18076         tg3_full_lock(tp, 0);
18077
18078         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18079
18080         tg3_flag_set(tp, INIT_COMPLETE);
18081         err = tg3_restart_hw(tp,
18082                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18083         if (err)
18084                 goto out;
18085
18086         tg3_timer_start(tp);
18087
18088         tg3_netif_start(tp);
18089
18090 out:
18091         tg3_full_unlock(tp);
18092
18093         if (!err)
18094                 tg3_phy_start(tp);
18095
18096 unlock:
18097         rtnl_unlock();
18098         return err;
18099 }
18100 #endif /* CONFIG_PM_SLEEP */
18101
18102 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18103
18104 static void tg3_shutdown(struct pci_dev *pdev)
18105 {
18106         struct net_device *dev = pci_get_drvdata(pdev);
18107         struct tg3 *tp = netdev_priv(dev);
18108
18109         rtnl_lock();
18110         netif_device_detach(dev);
18111
18112         if (netif_running(dev))
18113                 dev_close(dev);
18114
18115         if (system_state == SYSTEM_POWER_OFF)
18116                 tg3_power_down(tp);
18117
18118         rtnl_unlock();
18119 }
18120
18121 /**
18122  * tg3_io_error_detected - called when PCI error is detected
18123  * @pdev: Pointer to PCI device
18124  * @state: The current pci connection state
18125  *
18126  * This function is called after a PCI bus error affecting
18127  * this device has been detected.
18128  */
18129 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18130                                               pci_channel_state_t state)
18131 {
18132         struct net_device *netdev = pci_get_drvdata(pdev);
18133         struct tg3 *tp = netdev_priv(netdev);
18134         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18135
18136         netdev_info(netdev, "PCI I/O error detected\n");
18137
18138         rtnl_lock();
18139
18140         /* We probably don't have netdev yet */
18141         if (!netdev || !netif_running(netdev))
18142                 goto done;
18143
18144         /* We needn't recover from permanent error */
18145         if (state == pci_channel_io_frozen)
18146                 tp->pcierr_recovery = true;
18147
18148         tg3_phy_stop(tp);
18149
18150         tg3_netif_stop(tp);
18151
18152         tg3_timer_stop(tp);
18153
18154         /* Want to make sure that the reset task doesn't run */
18155         tg3_reset_task_cancel(tp);
18156
18157         netif_device_detach(netdev);
18158
18159         /* Clean up software state, even if MMIO is blocked */
18160         tg3_full_lock(tp, 0);
18161         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18162         tg3_full_unlock(tp);
18163
18164 done:
18165         if (state == pci_channel_io_perm_failure) {
18166                 if (netdev) {
18167                         tg3_napi_enable(tp);
18168                         dev_close(netdev);
18169                 }
18170                 err = PCI_ERS_RESULT_DISCONNECT;
18171         } else {
18172                 pci_disable_device(pdev);
18173         }
18174
18175         rtnl_unlock();
18176
18177         return err;
18178 }
18179
18180 /**
18181  * tg3_io_slot_reset - called after the pci bus has been reset.
18182  * @pdev: Pointer to PCI device
18183  *
18184  * Restart the card from scratch, as if from a cold-boot.
18185  * At this point, the card has exprienced a hard reset,
18186  * followed by fixups by BIOS, and has its config space
18187  * set up identically to what it was at cold boot.
18188  */
18189 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18190 {
18191         struct net_device *netdev = pci_get_drvdata(pdev);
18192         struct tg3 *tp = netdev_priv(netdev);
18193         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18194         int err;
18195
18196         rtnl_lock();
18197
18198         if (pci_enable_device(pdev)) {
18199                 dev_err(&pdev->dev,
18200                         "Cannot re-enable PCI device after reset.\n");
18201                 goto done;
18202         }
18203
18204         pci_set_master(pdev);
18205         pci_restore_state(pdev);
18206         pci_save_state(pdev);
18207
18208         if (!netdev || !netif_running(netdev)) {
18209                 rc = PCI_ERS_RESULT_RECOVERED;
18210                 goto done;
18211         }
18212
18213         err = tg3_power_up(tp);
18214         if (err)
18215                 goto done;
18216
18217         rc = PCI_ERS_RESULT_RECOVERED;
18218
18219 done:
18220         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18221                 tg3_napi_enable(tp);
18222                 dev_close(netdev);
18223         }
18224         rtnl_unlock();
18225
18226         return rc;
18227 }
18228
18229 /**
18230  * tg3_io_resume - called when traffic can start flowing again.
18231  * @pdev: Pointer to PCI device
18232  *
18233  * This callback is called when the error recovery driver tells
18234  * us that its OK to resume normal operation.
18235  */
18236 static void tg3_io_resume(struct pci_dev *pdev)
18237 {
18238         struct net_device *netdev = pci_get_drvdata(pdev);
18239         struct tg3 *tp = netdev_priv(netdev);
18240         int err;
18241
18242         rtnl_lock();
18243
18244         if (!netdev || !netif_running(netdev))
18245                 goto done;
18246
18247         tg3_full_lock(tp, 0);
18248         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18249         tg3_flag_set(tp, INIT_COMPLETE);
18250         err = tg3_restart_hw(tp, true);
18251         if (err) {
18252                 tg3_full_unlock(tp);
18253                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18254                 goto done;
18255         }
18256
18257         netif_device_attach(netdev);
18258
18259         tg3_timer_start(tp);
18260
18261         tg3_netif_start(tp);
18262
18263         tg3_full_unlock(tp);
18264
18265         tg3_phy_start(tp);
18266
18267 done:
18268         tp->pcierr_recovery = false;
18269         rtnl_unlock();
18270 }
18271
18272 static const struct pci_error_handlers tg3_err_handler = {
18273         .error_detected = tg3_io_error_detected,
18274         .slot_reset     = tg3_io_slot_reset,
18275         .resume         = tg3_io_resume
18276 };
18277
18278 static struct pci_driver tg3_driver = {
18279         .name           = DRV_MODULE_NAME,
18280         .id_table       = tg3_pci_tbl,
18281         .probe          = tg3_init_one,
18282         .remove         = tg3_remove_one,
18283         .err_handler    = &tg3_err_handler,
18284         .driver.pm      = &tg3_pm_ops,
18285         .shutdown       = tg3_shutdown,
18286 };
18287
18288 module_pci_driver(tg3_driver);