Merge tag 'drm-msm-fixes-2023-07-27' of https://gitlab.freedesktop.org/drm/msm into...
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0   0
71 #define BAR_2   2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         return test_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         set_bit(flag, bits);
85 }
86
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89         clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag)                              \
93         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag)                          \
95         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag)                        \
97         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME         "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM                     3
102 #define TG3_MIN_NUM                     137
103
104 #define RESET_KIND_SHUTDOWN     0
105 #define RESET_KIND_INIT         1
106 #define RESET_KIND_SUSPEND      2
107
108 #define TG3_DEF_RX_MODE         0
109 #define TG3_DEF_TX_MODE         0
110 #define TG3_DEF_MSG_ENABLE        \
111         (NETIF_MSG_DRV          | \
112          NETIF_MSG_PROBE        | \
113          NETIF_MSG_LINK         | \
114          NETIF_MSG_TIMER        | \
115          NETIF_MSG_IFDOWN       | \
116          NETIF_MSG_IFUP         | \
117          NETIF_MSG_RX_ERR       | \
118          NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
121
122 /* length of time before we decide the hardware is borked,
123  * and dev->tx_timeout() should be called to fix the problem
124  */
125
126 #define TG3_TX_TIMEOUT                  (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU                     ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134  * You can't change the ring sizes, but you can change where you place
135  * them in the NIC onboard memory.
136  */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING         200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147  * we really want to expose these constants to GCC so that modulo et
148  * al.  operations are done with shifts and masks instead of with
149  * hw multiply/modulo instructions.  Another solution would be to
150  * replace things like '% foo' with '& (foo - 1)'.
151  */
152
153 #define TG3_TX_RING_SIZE                512
154 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
163                                  TG3_TX_RING_SIZE)
164 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB               64
167
168 #define TG3_RX_STD_DMA_SZ               1536
169 #define TG3_RX_JMB_DMA_SZ               9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183  * that are at least dword aligned when used in PCIX mode.  The driver
184  * works around this bug by double copying the packet.  This workaround
185  * is built into the normal double copy length check for efficiency.
186  *
187  * However, the double copy is only necessary on those architectures
188  * where unaligned memory accesses are inefficient.  For those architectures
189  * where unaligned memory accesses incur little penalty, we can reintegrate
190  * the 5701 in the normal rx path.  Doing so saves a device structure
191  * dereference by hardcoding the double copy threshold in place.
192  */
193 #define TG3_RX_COPY_THRESHOLD           256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
196 #else
197         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K            2048
209 #define TG3_TX_BD_DMA_MAX_4K            4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
217 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3            "tigon/tg3.bin"
220 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289                         PCI_VENDOR_ID_LENOVO,
290                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355         {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361         const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363         { "rx_octets" },
364         { "rx_fragments" },
365         { "rx_ucast_packets" },
366         { "rx_mcast_packets" },
367         { "rx_bcast_packets" },
368         { "rx_fcs_errors" },
369         { "rx_align_errors" },
370         { "rx_xon_pause_rcvd" },
371         { "rx_xoff_pause_rcvd" },
372         { "rx_mac_ctrl_rcvd" },
373         { "rx_xoff_entered" },
374         { "rx_frame_too_long_errors" },
375         { "rx_jabbers" },
376         { "rx_undersize_packets" },
377         { "rx_in_length_errors" },
378         { "rx_out_length_errors" },
379         { "rx_64_or_less_octet_packets" },
380         { "rx_65_to_127_octet_packets" },
381         { "rx_128_to_255_octet_packets" },
382         { "rx_256_to_511_octet_packets" },
383         { "rx_512_to_1023_octet_packets" },
384         { "rx_1024_to_1522_octet_packets" },
385         { "rx_1523_to_2047_octet_packets" },
386         { "rx_2048_to_4095_octet_packets" },
387         { "rx_4096_to_8191_octet_packets" },
388         { "rx_8192_to_9022_octet_packets" },
389
390         { "tx_octets" },
391         { "tx_collisions" },
392
393         { "tx_xon_sent" },
394         { "tx_xoff_sent" },
395         { "tx_flow_control" },
396         { "tx_mac_errors" },
397         { "tx_single_collisions" },
398         { "tx_mult_collisions" },
399         { "tx_deferred" },
400         { "tx_excessive_collisions" },
401         { "tx_late_collisions" },
402         { "tx_collide_2times" },
403         { "tx_collide_3times" },
404         { "tx_collide_4times" },
405         { "tx_collide_5times" },
406         { "tx_collide_6times" },
407         { "tx_collide_7times" },
408         { "tx_collide_8times" },
409         { "tx_collide_9times" },
410         { "tx_collide_10times" },
411         { "tx_collide_11times" },
412         { "tx_collide_12times" },
413         { "tx_collide_13times" },
414         { "tx_collide_14times" },
415         { "tx_collide_15times" },
416         { "tx_ucast_packets" },
417         { "tx_mcast_packets" },
418         { "tx_bcast_packets" },
419         { "tx_carrier_sense_errors" },
420         { "tx_discards" },
421         { "tx_errors" },
422
423         { "dma_writeq_full" },
424         { "dma_write_prioq_full" },
425         { "rxbds_empty" },
426         { "rx_discards" },
427         { "rx_errors" },
428         { "rx_threshold_hit" },
429
430         { "dma_readq_full" },
431         { "dma_read_prioq_full" },
432         { "tx_comp_queue_full" },
433
434         { "ring_set_send_prod_index" },
435         { "ring_status_update" },
436         { "nic_irqs" },
437         { "nic_avoided_irqs" },
438         { "nic_tx_threshold_hit" },
439
440         { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST          0
445 #define TG3_LINK_TEST           1
446 #define TG3_REGISTER_TEST       2
447 #define TG3_MEMORY_TEST         3
448 #define TG3_MAC_LOOPB_TEST      4
449 #define TG3_PHY_LOOPB_TEST      5
450 #define TG3_EXT_LOOPB_TEST      6
451 #define TG3_INTERRUPT_TEST      7
452
453
454 static const struct {
455         const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
458         [TG3_LINK_TEST]         = { "link test         (online) " },
459         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
460         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
461         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
462         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
463         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
464         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
465 };
466
467 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
468
469
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472         writel(val, tp->regs + off);
473 }
474
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477         return readl(tp->regs + off);
478 }
479
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482         writel(val, tp->aperegs + off);
483 }
484
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487         return readl(tp->aperegs + off);
488 }
489
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492         unsigned long flags;
493
494         spin_lock_irqsave(&tp->indirect_lock, flags);
495         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497         spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502         writel(val, tp->regs + off);
503         readl(tp->regs + off);
504 }
505
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508         unsigned long flags;
509         u32 val;
510
511         spin_lock_irqsave(&tp->indirect_lock, flags);
512         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514         spin_unlock_irqrestore(&tp->indirect_lock, flags);
515         return val;
516 }
517
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520         unsigned long flags;
521
522         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524                                        TG3_64BIT_REG_LOW, val);
525                 return;
526         }
527         if (off == TG3_RX_STD_PROD_IDX_REG) {
528                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529                                        TG3_64BIT_REG_LOW, val);
530                 return;
531         }
532
533         spin_lock_irqsave(&tp->indirect_lock, flags);
534         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536         spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538         /* In indirect mode when disabling interrupts, we also need
539          * to clear the interrupt bit in the GRC local ctrl register.
540          */
541         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542             (val == 0x1)) {
543                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545         }
546 }
547
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550         unsigned long flags;
551         u32 val;
552
553         spin_lock_irqsave(&tp->indirect_lock, flags);
554         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556         spin_unlock_irqrestore(&tp->indirect_lock, flags);
557         return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561  * where it is unsafe to read back the register without some delay.
562  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564  */
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568                 /* Non-posted methods */
569                 tp->write32(tp, off, val);
570         else {
571                 /* Posted method */
572                 tg3_write32(tp, off, val);
573                 if (usec_wait)
574                         udelay(usec_wait);
575                 tp->read32(tp, off);
576         }
577         /* Wait again after the read for the posted method to guarantee that
578          * the wait time is met.
579          */
580         if (usec_wait)
581                 udelay(usec_wait);
582 }
583
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586         tp->write32_mbox(tp, off, val);
587         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589              !tg3_flag(tp, ICH_WORKAROUND)))
590                 tp->read32_mbox(tp, off);
591 }
592
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595         void __iomem *mbox = tp->regs + off;
596         writel(val, mbox);
597         if (tg3_flag(tp, TXD_MBOX_HWBUG))
598                 writel(val, mbox);
599         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600             tg3_flag(tp, FLUSH_POSTED_WRITES))
601                 readl(mbox);
602 }
603
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606         return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611         writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val)                  tp->write32(tp, reg, val)
621 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg)                       tp->read32(tp, reg)
624
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627         unsigned long flags;
628
629         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631                 return;
632
633         spin_lock_irqsave(&tp->indirect_lock, flags);
634         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638                 /* Always leave this as zero. */
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640         } else {
641                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644                 /* Always leave this as zero. */
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646         }
647         spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652         unsigned long flags;
653
654         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656                 *val = 0;
657                 return;
658         }
659
660         spin_lock_irqsave(&tp->indirect_lock, flags);
661         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665                 /* Always leave this as zero. */
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667         } else {
668                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669                 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671                 /* Always leave this as zero. */
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673         }
674         spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679         int i;
680         u32 regbase, bit;
681
682         if (tg3_asic_rev(tp) == ASIC_REV_5761)
683                 regbase = TG3_APE_LOCK_GRANT;
684         else
685                 regbase = TG3_APE_PER_LOCK_GRANT;
686
687         /* Make sure the driver hasn't any stale locks. */
688         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689                 switch (i) {
690                 case TG3_APE_LOCK_PHY0:
691                 case TG3_APE_LOCK_PHY1:
692                 case TG3_APE_LOCK_PHY2:
693                 case TG3_APE_LOCK_PHY3:
694                         bit = APE_LOCK_GRANT_DRIVER;
695                         break;
696                 default:
697                         if (!tp->pci_fn)
698                                 bit = APE_LOCK_GRANT_DRIVER;
699                         else
700                                 bit = 1 << tp->pci_fn;
701                 }
702                 tg3_ape_write32(tp, regbase + 4 * i, bit);
703         }
704
705 }
706
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709         int i, off;
710         int ret = 0;
711         u32 status, req, gnt, bit;
712
713         if (!tg3_flag(tp, ENABLE_APE))
714                 return 0;
715
716         switch (locknum) {
717         case TG3_APE_LOCK_GPIO:
718                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719                         return 0;
720                 fallthrough;
721         case TG3_APE_LOCK_GRC:
722         case TG3_APE_LOCK_MEM:
723                 if (!tp->pci_fn)
724                         bit = APE_LOCK_REQ_DRIVER;
725                 else
726                         bit = 1 << tp->pci_fn;
727                 break;
728         case TG3_APE_LOCK_PHY0:
729         case TG3_APE_LOCK_PHY1:
730         case TG3_APE_LOCK_PHY2:
731         case TG3_APE_LOCK_PHY3:
732                 bit = APE_LOCK_REQ_DRIVER;
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739                 req = TG3_APE_LOCK_REQ;
740                 gnt = TG3_APE_LOCK_GRANT;
741         } else {
742                 req = TG3_APE_PER_LOCK_REQ;
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744         }
745
746         off = 4 * locknum;
747
748         tg3_ape_write32(tp, req + off, bit);
749
750         /* Wait for up to 1 millisecond to acquire lock. */
751         for (i = 0; i < 100; i++) {
752                 status = tg3_ape_read32(tp, gnt + off);
753                 if (status == bit)
754                         break;
755                 if (pci_channel_offline(tp->pdev))
756                         break;
757
758                 udelay(10);
759         }
760
761         if (status != bit) {
762                 /* Revoke the lock request. */
763                 tg3_ape_write32(tp, gnt + off, bit);
764                 ret = -EBUSY;
765         }
766
767         return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772         u32 gnt, bit;
773
774         if (!tg3_flag(tp, ENABLE_APE))
775                 return;
776
777         switch (locknum) {
778         case TG3_APE_LOCK_GPIO:
779                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780                         return;
781                 fallthrough;
782         case TG3_APE_LOCK_GRC:
783         case TG3_APE_LOCK_MEM:
784                 if (!tp->pci_fn)
785                         bit = APE_LOCK_GRANT_DRIVER;
786                 else
787                         bit = 1 << tp->pci_fn;
788                 break;
789         case TG3_APE_LOCK_PHY0:
790         case TG3_APE_LOCK_PHY1:
791         case TG3_APE_LOCK_PHY2:
792         case TG3_APE_LOCK_PHY3:
793                 bit = APE_LOCK_GRANT_DRIVER;
794                 break;
795         default:
796                 return;
797         }
798
799         if (tg3_asic_rev(tp) == ASIC_REV_5761)
800                 gnt = TG3_APE_LOCK_GRANT;
801         else
802                 gnt = TG3_APE_PER_LOCK_GRANT;
803
804         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809         u32 apedata;
810
811         while (timeout_us) {
812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813                         return -EBUSY;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821                 udelay(10);
822                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823         }
824
825         return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831         u32 i, apedata;
832
833         for (i = 0; i < timeout_us / 10; i++) {
834                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837                         break;
838
839                 udelay(10);
840         }
841
842         return i == timeout_us / 10;
843 }
844
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846                                    u32 len)
847 {
848         int err;
849         u32 i, bufoff, msgoff, maxlen, apedata;
850
851         if (!tg3_flag(tp, APE_HAS_NCSI))
852                 return 0;
853
854         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855         if (apedata != APE_SEG_SIG_MAGIC)
856                 return -ENODEV;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859         if (!(apedata & APE_FW_STATUS_READY))
860                 return -EAGAIN;
861
862         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863                  TG3_APE_SHMEM_BASE;
864         msgoff = bufoff + 2 * sizeof(u32);
865         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867         while (len) {
868                 u32 length;
869
870                 /* Cap xfer sizes to scratchpad limits. */
871                 length = (len > maxlen) ? maxlen : len;
872                 len -= length;
873
874                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875                 if (!(apedata & APE_FW_STATUS_READY))
876                         return -EAGAIN;
877
878                 /* Wait for up to 1 msec for APE to service previous event. */
879                 err = tg3_ape_event_lock(tp, 1000);
880                 if (err)
881                         return err;
882
883                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884                           APE_EVENT_STATUS_SCRTCHPD_READ |
885                           APE_EVENT_STATUS_EVENT_PENDING;
886                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888                 tg3_ape_write32(tp, bufoff, base_off);
889                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894                 base_off += length;
895
896                 if (tg3_ape_wait_for_event(tp, 30000))
897                         return -EAGAIN;
898
899                 for (i = 0; length; i += 4, length -= 4) {
900                         u32 val = tg3_ape_read32(tp, msgoff + i);
901                         memcpy(data, &val, sizeof(u32));
902                         data++;
903                 }
904         }
905
906         return 0;
907 }
908 #endif
909
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912         int err;
913         u32 apedata;
914
915         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916         if (apedata != APE_SEG_SIG_MAGIC)
917                 return -EAGAIN;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920         if (!(apedata & APE_FW_STATUS_READY))
921                 return -EAGAIN;
922
923         /* Wait for up to 20 millisecond for APE to service previous event. */
924         err = tg3_ape_event_lock(tp, 20000);
925         if (err)
926                 return err;
927
928         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929                         event | APE_EVENT_STATUS_EVENT_PENDING);
930
931         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934         return 0;
935 }
936
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939         u32 event;
940         u32 apedata;
941
942         if (!tg3_flag(tp, ENABLE_APE))
943                 return;
944
945         switch (kind) {
946         case RESET_KIND_INIT:
947                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949                                 APE_HOST_SEG_SIG_MAGIC);
950                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951                                 APE_HOST_SEG_LEN_MAGIC);
952                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957                                 APE_HOST_BEHAV_NO_PHYLOCK);
958                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959                                     TG3_APE_HOST_DRVR_STATE_START);
960
961                 event = APE_EVENT_STATUS_STATE_START;
962                 break;
963         case RESET_KIND_SHUTDOWN:
964                 if (device_may_wakeup(&tp->pdev->dev) &&
965                     tg3_flag(tp, WOL_ENABLE)) {
966                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967                                             TG3_APE_HOST_WOL_SPEED_AUTO);
968                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969                 } else
970                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974                 event = APE_EVENT_STATUS_STATE_UNLOAD;
975                 break;
976         default:
977                 return;
978         }
979
980         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982         tg3_ape_send_event(tp, event);
983 }
984
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986                                    unsigned long interval)
987 {
988         /* Check if hb interval has exceeded */
989         if (!tg3_flag(tp, ENABLE_APE) ||
990             time_before(jiffies, tp->ape_hb_jiffies + interval))
991                 return;
992
993         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994         tp->ape_hb_jiffies = jiffies;
995 }
996
997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999         int i;
1000
1001         tw32(TG3PCI_MISC_HOST_CTRL,
1002              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003         for (i = 0; i < tp->irq_max; i++)
1004                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009         int i;
1010
1011         tp->irq_sync = 0;
1012         wmb();
1013
1014         tw32(TG3PCI_MISC_HOST_CTRL,
1015              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018         for (i = 0; i < tp->irq_cnt; i++) {
1019                 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022                 if (tg3_flag(tp, 1SHOT_MSI))
1023                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025                 tp->coal_now |= tnapi->coal_now;
1026         }
1027
1028         /* Force an initial interrupt */
1029         if (!tg3_flag(tp, TAGGED_STATUS) &&
1030             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032         else
1033                 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040         struct tg3 *tp = tnapi->tp;
1041         struct tg3_hw_status *sblk = tnapi->hw_status;
1042         unsigned int work_exists = 0;
1043
1044         /* check for phy events */
1045         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046                 if (sblk->status & SD_STATUS_LINK_CHG)
1047                         work_exists = 1;
1048         }
1049
1050         /* check for TX work to do */
1051         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052                 work_exists = 1;
1053
1054         /* check for RX work to do */
1055         if (tnapi->rx_rcb_prod_idx &&
1056             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057                 work_exists = 1;
1058
1059         return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063  *  similar to tg3_enable_ints, but it accurately determines whether there
1064  *  is new work pending and can return without flushing the PIO write
1065  *  which reenables interrupts
1066  */
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069         struct tg3 *tp = tnapi->tp;
1070
1071         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073         /* When doing tagged status, this work check is unnecessary.
1074          * The last_tag we write above tells the chip which piece of
1075          * work we've completed.
1076          */
1077         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084         u32 clock_ctrl;
1085         u32 orig_clock_ctrl;
1086
1087         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088                 return;
1089
1090         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092         orig_clock_ctrl = clock_ctrl;
1093         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094                        CLOCK_CTRL_CLKRUN_OENABLE |
1095                        0x1f);
1096         tp->pci_clock_ctrl = clock_ctrl;
1097
1098         if (tg3_flag(tp, 5705_PLUS)) {
1099                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102                 }
1103         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105                             clock_ctrl |
1106                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107                             40);
1108                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110                             40);
1111         }
1112         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS  5000
1116
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118                          u32 *val)
1119 {
1120         u32 frame_val;
1121         unsigned int loops;
1122         int ret;
1123
1124         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125                 tw32_f(MAC_MI_MODE,
1126                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127                 udelay(80);
1128         }
1129
1130         tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132         *val = 0x0;
1133
1134         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135                       MI_COM_PHY_ADDR_MASK);
1136         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137                       MI_COM_REG_ADDR_MASK);
1138         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140         tw32_f(MAC_MI_COM, frame_val);
1141
1142         loops = PHY_BUSY_LOOPS;
1143         while (loops != 0) {
1144                 udelay(10);
1145                 frame_val = tr32(MAC_MI_COM);
1146
1147                 if ((frame_val & MI_COM_BUSY) == 0) {
1148                         udelay(5);
1149                         frame_val = tr32(MAC_MI_COM);
1150                         break;
1151                 }
1152                 loops -= 1;
1153         }
1154
1155         ret = -EBUSY;
1156         if (loops != 0) {
1157                 *val = frame_val & MI_COM_DATA_MASK;
1158                 ret = 0;
1159         }
1160
1161         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163                 udelay(80);
1164         }
1165
1166         tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168         return ret;
1169 }
1170
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177                           u32 val)
1178 {
1179         u32 frame_val;
1180         unsigned int loops;
1181         int ret;
1182
1183         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185                 return 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE,
1189                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190                 udelay(80);
1191         }
1192
1193         tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196                       MI_COM_PHY_ADDR_MASK);
1197         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198                       MI_COM_REG_ADDR_MASK);
1199         frame_val |= (val & MI_COM_DATA_MASK);
1200         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202         tw32_f(MAC_MI_COM, frame_val);
1203
1204         loops = PHY_BUSY_LOOPS;
1205         while (loops != 0) {
1206                 udelay(10);
1207                 frame_val = tr32(MAC_MI_COM);
1208                 if ((frame_val & MI_COM_BUSY) == 0) {
1209                         udelay(5);
1210                         frame_val = tr32(MAC_MI_COM);
1211                         break;
1212                 }
1213                 loops -= 1;
1214         }
1215
1216         ret = -EBUSY;
1217         if (loops != 0)
1218                 ret = 0;
1219
1220         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222                 udelay(80);
1223         }
1224
1225         tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227         return ret;
1228 }
1229
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237         int err;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240         if (err)
1241                 goto done;
1242
1243         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255         return err;
1256 }
1257
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260         int err;
1261
1262         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263         if (err)
1264                 goto done;
1265
1266         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267         if (err)
1268                 goto done;
1269
1270         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272         if (err)
1273                 goto done;
1274
1275         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278         return err;
1279 }
1280
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283         int err;
1284
1285         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286         if (!err)
1287                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289         return err;
1290 }
1291
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294         int err;
1295
1296         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297         if (!err)
1298                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300         return err;
1301 }
1302
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305         int err;
1306
1307         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1310         if (!err)
1311                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313         return err;
1314 }
1315
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319                 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326         u32 val;
1327         int err;
1328
1329         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331         if (err)
1332                 return err;
1333
1334         if (enable)
1335                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336         else
1337                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342         return err;
1343 }
1344
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348                             reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353         u32 phy_control;
1354         int limit, err;
1355
1356         /* OK, reset it, and poll the BMCR_RESET bit until it
1357          * clears or we time out.
1358          */
1359         phy_control = BMCR_RESET;
1360         err = tg3_writephy(tp, MII_BMCR, phy_control);
1361         if (err != 0)
1362                 return -EBUSY;
1363
1364         limit = 5000;
1365         while (limit--) {
1366                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367                 if (err != 0)
1368                         return -EBUSY;
1369
1370                 if ((phy_control & BMCR_RESET) == 0) {
1371                         udelay(40);
1372                         break;
1373                 }
1374                 udelay(10);
1375         }
1376         if (limit < 0)
1377                 return -EBUSY;
1378
1379         return 0;
1380 }
1381
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384         struct tg3 *tp = bp->priv;
1385         u32 val;
1386
1387         spin_lock_bh(&tp->lock);
1388
1389         if (__tg3_readphy(tp, mii_id, reg, &val))
1390                 val = -EIO;
1391
1392         spin_unlock_bh(&tp->lock);
1393
1394         return val;
1395 }
1396
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399         struct tg3 *tp = bp->priv;
1400         u32 ret = 0;
1401
1402         spin_lock_bh(&tp->lock);
1403
1404         if (__tg3_writephy(tp, mii_id, reg, val))
1405                 ret = -EIO;
1406
1407         spin_unlock_bh(&tp->lock);
1408
1409         return ret;
1410 }
1411
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414         u32 val;
1415         struct phy_device *phydev;
1416
1417         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419         case PHY_ID_BCM50610:
1420         case PHY_ID_BCM50610M:
1421                 val = MAC_PHYCFG2_50610_LED_MODES;
1422                 break;
1423         case PHY_ID_BCMAC131:
1424                 val = MAC_PHYCFG2_AC131_LED_MODES;
1425                 break;
1426         case PHY_ID_RTL8211C:
1427                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428                 break;
1429         case PHY_ID_RTL8201E:
1430                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431                 break;
1432         default:
1433                 return;
1434         }
1435
1436         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437                 tw32(MAC_PHYCFG2, val);
1438
1439                 val = tr32(MAC_PHYCFG1);
1440                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443                 tw32(MAC_PHYCFG1, val);
1444
1445                 return;
1446         }
1447
1448         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450                        MAC_PHYCFG2_FMODE_MASK_MASK |
1451                        MAC_PHYCFG2_GMODE_MASK_MASK |
1452                        MAC_PHYCFG2_ACT_MASK_MASK   |
1453                        MAC_PHYCFG2_QUAL_MASK_MASK |
1454                        MAC_PHYCFG2_INBAND_ENABLE;
1455
1456         tw32(MAC_PHYCFG2, val);
1457
1458         val = tr32(MAC_PHYCFG1);
1459         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466         }
1467         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469         tw32(MAC_PHYCFG1, val);
1470
1471         val = tr32(MAC_EXT_RGMII_MODE);
1472         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473                  MAC_RGMII_MODE_RX_QUALITY |
1474                  MAC_RGMII_MODE_RX_ACTIVITY |
1475                  MAC_RGMII_MODE_RX_ENG_DET |
1476                  MAC_RGMII_MODE_TX_ENABLE |
1477                  MAC_RGMII_MODE_TX_LOWPWR |
1478                  MAC_RGMII_MODE_TX_RESET);
1479         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481                         val |= MAC_RGMII_MODE_RX_INT_B |
1482                                MAC_RGMII_MODE_RX_QUALITY |
1483                                MAC_RGMII_MODE_RX_ACTIVITY |
1484                                MAC_RGMII_MODE_RX_ENG_DET;
1485                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486                         val |= MAC_RGMII_MODE_TX_ENABLE |
1487                                MAC_RGMII_MODE_TX_LOWPWR |
1488                                MAC_RGMII_MODE_TX_RESET;
1489         }
1490         tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496         tw32_f(MAC_MI_MODE, tp->mi_mode);
1497         udelay(80);
1498
1499         if (tg3_flag(tp, MDIOBUS_INITED) &&
1500             tg3_asic_rev(tp) == ASIC_REV_5785)
1501                 tg3_mdio_config_5785(tp);
1502 }
1503
1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506         int i;
1507         u32 reg;
1508         struct phy_device *phydev;
1509
1510         if (tg3_flag(tp, 5717_PLUS)) {
1511                 u32 is_serdes;
1512
1513                 tp->phy_addr = tp->pci_fn + 1;
1514
1515                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517                 else
1518                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1520                 if (is_serdes)
1521                         tp->phy_addr += 7;
1522         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523                 int addr;
1524
1525                 addr = ssb_gige_get_phyaddr(tp->pdev);
1526                 if (addr < 0)
1527                         return addr;
1528                 tp->phy_addr = addr;
1529         } else
1530                 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532         tg3_mdio_start(tp);
1533
1534         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535                 return 0;
1536
1537         tp->mdio_bus = mdiobus_alloc();
1538         if (tp->mdio_bus == NULL)
1539                 return -ENOMEM;
1540
1541         tp->mdio_bus->name     = "tg3 mdio bus";
1542         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1543                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1544         tp->mdio_bus->priv     = tp;
1545         tp->mdio_bus->parent   = &tp->pdev->dev;
1546         tp->mdio_bus->read     = &tg3_mdio_read;
1547         tp->mdio_bus->write    = &tg3_mdio_write;
1548         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549
1550         /* The bus registration will look for all the PHYs on the mdio bus.
1551          * Unfortunately, it does not ensure the PHY is powered up before
1552          * accessing the PHY ID registers.  A chip reset is the
1553          * quickest way to bring the device back to an operational state..
1554          */
1555         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1556                 tg3_bmcr_reset(tp);
1557
1558         i = mdiobus_register(tp->mdio_bus);
1559         if (i) {
1560                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561                 mdiobus_free(tp->mdio_bus);
1562                 return i;
1563         }
1564
1565         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566
1567         if (!phydev || !phydev->drv) {
1568                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569                 mdiobus_unregister(tp->mdio_bus);
1570                 mdiobus_free(tp->mdio_bus);
1571                 return -ENODEV;
1572         }
1573
1574         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575         case PHY_ID_BCM57780:
1576                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1577                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578                 break;
1579         case PHY_ID_BCM50610:
1580         case PHY_ID_BCM50610M:
1581                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582                                      PHY_BRCM_RX_REFCLK_UNUSED |
1583                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585                 fallthrough;
1586         case PHY_ID_RTL8211C:
1587                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588                 break;
1589         case PHY_ID_RTL8201E:
1590         case PHY_ID_BCMAC131:
1591                 phydev->interface = PHY_INTERFACE_MODE_MII;
1592                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1594                 break;
1595         }
1596
1597         tg3_flag_set(tp, MDIOBUS_INITED);
1598
1599         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600                 tg3_mdio_config_5785(tp);
1601
1602         return 0;
1603 }
1604
1605 static void tg3_mdio_fini(struct tg3 *tp)
1606 {
1607         if (tg3_flag(tp, MDIOBUS_INITED)) {
1608                 tg3_flag_clear(tp, MDIOBUS_INITED);
1609                 mdiobus_unregister(tp->mdio_bus);
1610                 mdiobus_free(tp->mdio_bus);
1611         }
1612 }
1613
1614 /* tp->lock is held. */
1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1616 {
1617         u32 val;
1618
1619         val = tr32(GRC_RX_CPU_EVENT);
1620         val |= GRC_RX_CPU_DRIVER_EVENT;
1621         tw32_f(GRC_RX_CPU_EVENT, val);
1622
1623         tp->last_event_jiffies = jiffies;
1624 }
1625
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627
1628 /* tp->lock is held. */
1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 {
1631         int i;
1632         unsigned int delay_cnt;
1633         long time_remain;
1634
1635         /* If enough time has passed, no wait is necessary. */
1636         time_remain = (long)(tp->last_event_jiffies + 1 +
1637                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638                       (long)jiffies;
1639         if (time_remain < 0)
1640                 return;
1641
1642         /* Check if we can shorten the wait time. */
1643         delay_cnt = jiffies_to_usecs(time_remain);
1644         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646         delay_cnt = (delay_cnt >> 3) + 1;
1647
1648         for (i = 0; i < delay_cnt; i++) {
1649                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650                         break;
1651                 if (pci_channel_offline(tp->pdev))
1652                         break;
1653
1654                 udelay(8);
1655         }
1656 }
1657
1658 /* tp->lock is held. */
1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1660 {
1661         u32 reg, val;
1662
1663         val = 0;
1664         if (!tg3_readphy(tp, MII_BMCR, &reg))
1665                 val = reg << 16;
1666         if (!tg3_readphy(tp, MII_BMSR, &reg))
1667                 val |= (reg & 0xffff);
1668         *data++ = val;
1669
1670         val = 0;
1671         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1672                 val = reg << 16;
1673         if (!tg3_readphy(tp, MII_LPA, &reg))
1674                 val |= (reg & 0xffff);
1675         *data++ = val;
1676
1677         val = 0;
1678         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1680                         val = reg << 16;
1681                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1682                         val |= (reg & 0xffff);
1683         }
1684         *data++ = val;
1685
1686         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1687                 val = reg << 16;
1688         else
1689                 val = 0;
1690         *data++ = val;
1691 }
1692
1693 /* tp->lock is held. */
1694 static void tg3_ump_link_report(struct tg3 *tp)
1695 {
1696         u32 data[4];
1697
1698         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699                 return;
1700
1701         tg3_phy_gather_ump_data(tp, data);
1702
1703         tg3_wait_for_event_ack(tp);
1704
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711
1712         tg3_generate_fw_event(tp);
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_stop_fw(struct tg3 *tp)
1717 {
1718         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719                 /* Wait for RX cpu to ACK the previous event. */
1720                 tg3_wait_for_event_ack(tp);
1721
1722                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723
1724                 tg3_generate_fw_event(tp);
1725
1726                 /* Wait for RX cpu to ACK this event. */
1727                 tg3_wait_for_event_ack(tp);
1728         }
1729 }
1730
1731 /* tp->lock is held. */
1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 {
1734         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736
1737         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738                 switch (kind) {
1739                 case RESET_KIND_INIT:
1740                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741                                       DRV_STATE_START);
1742                         break;
1743
1744                 case RESET_KIND_SHUTDOWN:
1745                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746                                       DRV_STATE_UNLOAD);
1747                         break;
1748
1749                 case RESET_KIND_SUSPEND:
1750                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751                                       DRV_STATE_SUSPEND);
1752                         break;
1753
1754                 default:
1755                         break;
1756                 }
1757         }
1758 }
1759
1760 /* tp->lock is held. */
1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 {
1763         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764                 switch (kind) {
1765                 case RESET_KIND_INIT:
1766                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767                                       DRV_STATE_START_DONE);
1768                         break;
1769
1770                 case RESET_KIND_SHUTDOWN:
1771                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772                                       DRV_STATE_UNLOAD_DONE);
1773                         break;
1774
1775                 default:
1776                         break;
1777                 }
1778         }
1779 }
1780
1781 /* tp->lock is held. */
1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 {
1784         if (tg3_flag(tp, ENABLE_ASF)) {
1785                 switch (kind) {
1786                 case RESET_KIND_INIT:
1787                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788                                       DRV_STATE_START);
1789                         break;
1790
1791                 case RESET_KIND_SHUTDOWN:
1792                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793                                       DRV_STATE_UNLOAD);
1794                         break;
1795
1796                 case RESET_KIND_SUSPEND:
1797                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798                                       DRV_STATE_SUSPEND);
1799                         break;
1800
1801                 default:
1802                         break;
1803                 }
1804         }
1805 }
1806
1807 static int tg3_poll_fw(struct tg3 *tp)
1808 {
1809         int i;
1810         u32 val;
1811
1812         if (tg3_flag(tp, NO_FWARE_REPORTED))
1813                 return 0;
1814
1815         if (tg3_flag(tp, IS_SSB_CORE)) {
1816                 /* We don't use firmware. */
1817                 return 0;
1818         }
1819
1820         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821                 /* Wait up to 20ms for init done. */
1822                 for (i = 0; i < 200; i++) {
1823                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824                                 return 0;
1825                         if (pci_channel_offline(tp->pdev))
1826                                 return -ENODEV;
1827
1828                         udelay(100);
1829                 }
1830                 return -ENODEV;
1831         }
1832
1833         /* Wait for firmware initialization to complete. */
1834         for (i = 0; i < 100000; i++) {
1835                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837                         break;
1838                 if (pci_channel_offline(tp->pdev)) {
1839                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1841                                 netdev_info(tp->dev, "No firmware running\n");
1842                         }
1843
1844                         break;
1845                 }
1846
1847                 udelay(10);
1848         }
1849
1850         /* Chip might not be fitted with firmware.  Some Sun onboard
1851          * parts are configured like that.  So don't signal the timeout
1852          * of the above loop as an error, but do report the lack of
1853          * running firmware once.
1854          */
1855         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857
1858                 netdev_info(tp->dev, "No firmware running\n");
1859         }
1860
1861         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862                 /* The 57765 A0 needs a little more
1863                  * time to do some important work.
1864                  */
1865                 mdelay(10);
1866         }
1867
1868         return 0;
1869 }
1870
1871 static void tg3_link_report(struct tg3 *tp)
1872 {
1873         if (!netif_carrier_ok(tp->dev)) {
1874                 netif_info(tp, link, tp->dev, "Link is down\n");
1875                 tg3_ump_link_report(tp);
1876         } else if (netif_msg_link(tp)) {
1877                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878                             (tp->link_config.active_speed == SPEED_1000 ?
1879                              1000 :
1880                              (tp->link_config.active_speed == SPEED_100 ?
1881                               100 : 10)),
1882                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1883                              "full" : "half"));
1884
1885                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887                             "on" : "off",
1888                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889                             "on" : "off");
1890
1891                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892                         netdev_info(tp->dev, "EEE is %s\n",
1893                                     tp->setlpicnt ? "enabled" : "disabled");
1894
1895                 tg3_ump_link_report(tp);
1896         }
1897
1898         tp->link_up = netif_carrier_ok(tp->dev);
1899 }
1900
1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 {
1903         u32 flowctrl = 0;
1904
1905         if (adv & ADVERTISE_PAUSE_CAP) {
1906                 flowctrl |= FLOW_CTRL_RX;
1907                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1908                         flowctrl |= FLOW_CTRL_TX;
1909         } else if (adv & ADVERTISE_PAUSE_ASYM)
1910                 flowctrl |= FLOW_CTRL_TX;
1911
1912         return flowctrl;
1913 }
1914
1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 {
1917         u16 miireg;
1918
1919         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920                 miireg = ADVERTISE_1000XPAUSE;
1921         else if (flow_ctrl & FLOW_CTRL_TX)
1922                 miireg = ADVERTISE_1000XPSE_ASYM;
1923         else if (flow_ctrl & FLOW_CTRL_RX)
1924                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1925         else
1926                 miireg = 0;
1927
1928         return miireg;
1929 }
1930
1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 {
1933         u32 flowctrl = 0;
1934
1935         if (adv & ADVERTISE_1000XPAUSE) {
1936                 flowctrl |= FLOW_CTRL_RX;
1937                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938                         flowctrl |= FLOW_CTRL_TX;
1939         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1940                 flowctrl |= FLOW_CTRL_TX;
1941
1942         return flowctrl;
1943 }
1944
1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 {
1947         u8 cap = 0;
1948
1949         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952                 if (lcladv & ADVERTISE_1000XPAUSE)
1953                         cap = FLOW_CTRL_RX;
1954                 if (rmtadv & ADVERTISE_1000XPAUSE)
1955                         cap = FLOW_CTRL_TX;
1956         }
1957
1958         return cap;
1959 }
1960
1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 {
1963         u8 autoneg;
1964         u8 flowctrl = 0;
1965         u32 old_rx_mode = tp->rx_mode;
1966         u32 old_tx_mode = tp->tx_mode;
1967
1968         if (tg3_flag(tp, USE_PHYLIB))
1969                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970         else
1971                 autoneg = tp->link_config.autoneg;
1972
1973         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976                 else
1977                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978         } else
1979                 flowctrl = tp->link_config.flowctrl;
1980
1981         tp->link_config.active_flowctrl = flowctrl;
1982
1983         if (flowctrl & FLOW_CTRL_RX)
1984                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985         else
1986                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987
1988         if (old_rx_mode != tp->rx_mode)
1989                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1990
1991         if (flowctrl & FLOW_CTRL_TX)
1992                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993         else
1994                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995
1996         if (old_tx_mode != tp->tx_mode)
1997                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 }
1999
2000 static void tg3_adjust_link(struct net_device *dev)
2001 {
2002         u8 oldflowctrl, linkmesg = 0;
2003         u32 mac_mode, lcl_adv, rmt_adv;
2004         struct tg3 *tp = netdev_priv(dev);
2005         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006
2007         spin_lock_bh(&tp->lock);
2008
2009         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010                                     MAC_MODE_HALF_DUPLEX);
2011
2012         oldflowctrl = tp->link_config.active_flowctrl;
2013
2014         if (phydev->link) {
2015                 lcl_adv = 0;
2016                 rmt_adv = 0;
2017
2018                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2020                 else if (phydev->speed == SPEED_1000 ||
2021                          tg3_asic_rev(tp) != ASIC_REV_5785)
2022                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023                 else
2024                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2025
2026                 if (phydev->duplex == DUPLEX_HALF)
2027                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2028                 else {
2029                         lcl_adv = mii_advertise_flowctrl(
2030                                   tp->link_config.flowctrl);
2031
2032                         if (phydev->pause)
2033                                 rmt_adv = LPA_PAUSE_CAP;
2034                         if (phydev->asym_pause)
2035                                 rmt_adv |= LPA_PAUSE_ASYM;
2036                 }
2037
2038                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039         } else
2040                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041
2042         if (mac_mode != tp->mac_mode) {
2043                 tp->mac_mode = mac_mode;
2044                 tw32_f(MAC_MODE, tp->mac_mode);
2045                 udelay(40);
2046         }
2047
2048         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049                 if (phydev->speed == SPEED_10)
2050                         tw32(MAC_MI_STAT,
2051                              MAC_MI_STAT_10MBPS_MODE |
2052                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053                 else
2054                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055         }
2056
2057         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058                 tw32(MAC_TX_LENGTHS,
2059                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060                       (6 << TX_LENGTHS_IPG_SHIFT) |
2061                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062         else
2063                 tw32(MAC_TX_LENGTHS,
2064                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065                       (6 << TX_LENGTHS_IPG_SHIFT) |
2066                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067
2068         if (phydev->link != tp->old_link ||
2069             phydev->speed != tp->link_config.active_speed ||
2070             phydev->duplex != tp->link_config.active_duplex ||
2071             oldflowctrl != tp->link_config.active_flowctrl)
2072                 linkmesg = 1;
2073
2074         tp->old_link = phydev->link;
2075         tp->link_config.active_speed = phydev->speed;
2076         tp->link_config.active_duplex = phydev->duplex;
2077
2078         spin_unlock_bh(&tp->lock);
2079
2080         if (linkmesg)
2081                 tg3_link_report(tp);
2082 }
2083
2084 static int tg3_phy_init(struct tg3 *tp)
2085 {
2086         struct phy_device *phydev;
2087
2088         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089                 return 0;
2090
2091         /* Bring the PHY back to a known state. */
2092         tg3_bmcr_reset(tp);
2093
2094         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095
2096         /* Attach the MAC to the PHY. */
2097         phydev = phy_connect(tp->dev, phydev_name(phydev),
2098                              tg3_adjust_link, phydev->interface);
2099         if (IS_ERR(phydev)) {
2100                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101                 return PTR_ERR(phydev);
2102         }
2103
2104         /* Mask with MAC supported features. */
2105         switch (phydev->interface) {
2106         case PHY_INTERFACE_MODE_GMII:
2107         case PHY_INTERFACE_MODE_RGMII:
2108                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109                         phy_set_max_speed(phydev, SPEED_1000);
2110                         phy_support_asym_pause(phydev);
2111                         break;
2112                 }
2113                 fallthrough;
2114         case PHY_INTERFACE_MODE_MII:
2115                 phy_set_max_speed(phydev, SPEED_100);
2116                 phy_support_asym_pause(phydev);
2117                 break;
2118         default:
2119                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120                 return -EINVAL;
2121         }
2122
2123         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124
2125         phy_attached_info(phydev);
2126
2127         return 0;
2128 }
2129
2130 static void tg3_phy_start(struct tg3 *tp)
2131 {
2132         struct phy_device *phydev;
2133
2134         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135                 return;
2136
2137         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138
2139         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141                 phydev->speed = tp->link_config.speed;
2142                 phydev->duplex = tp->link_config.duplex;
2143                 phydev->autoneg = tp->link_config.autoneg;
2144                 ethtool_convert_legacy_u32_to_link_mode(
2145                         phydev->advertising, tp->link_config.advertising);
2146         }
2147
2148         phy_start(phydev);
2149
2150         phy_start_aneg(phydev);
2151 }
2152
2153 static void tg3_phy_stop(struct tg3 *tp)
2154 {
2155         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156                 return;
2157
2158         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 }
2160
2161 static void tg3_phy_fini(struct tg3 *tp)
2162 {
2163         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2166         }
2167 }
2168
2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2170 {
2171         int err;
2172         u32 val;
2173
2174         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175                 return 0;
2176
2177         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178                 /* Cannot do read-modify-write on 5401 */
2179                 err = tg3_phy_auxctl_write(tp,
2180                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2182                                            0x4c20);
2183                 goto done;
2184         }
2185
2186         err = tg3_phy_auxctl_read(tp,
2187                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2188         if (err)
2189                 return err;
2190
2191         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192         err = tg3_phy_auxctl_write(tp,
2193                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194
2195 done:
2196         return err;
2197 }
2198
2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201         u32 phytest;
2202
2203         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204                 u32 phy;
2205
2206                 tg3_writephy(tp, MII_TG3_FET_TEST,
2207                              phytest | MII_TG3_FET_SHADOW_EN);
2208                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209                         if (enable)
2210                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211                         else
2212                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214                 }
2215                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2216         }
2217 }
2218
2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2220 {
2221         u32 reg;
2222
2223         if (!tg3_flag(tp, 5705_PLUS) ||
2224             (tg3_flag(tp, 5717_PLUS) &&
2225              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226                 return;
2227
2228         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229                 tg3_phy_fet_toggle_apd(tp, enable);
2230                 return;
2231         }
2232
2233         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235               MII_TG3_MISC_SHDW_SCR5_SDTL |
2236               MII_TG3_MISC_SHDW_SCR5_C125OE;
2237         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241
2242
2243         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244         if (enable)
2245                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246
2247         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 }
2249
2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2251 {
2252         u32 phy;
2253
2254         if (!tg3_flag(tp, 5705_PLUS) ||
2255             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256                 return;
2257
2258         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259                 u32 ephy;
2260
2261                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263
2264                         tg3_writephy(tp, MII_TG3_FET_TEST,
2265                                      ephy | MII_TG3_FET_SHADOW_EN);
2266                         if (!tg3_readphy(tp, reg, &phy)) {
2267                                 if (enable)
2268                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269                                 else
2270                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271                                 tg3_writephy(tp, reg, phy);
2272                         }
2273                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2274                 }
2275         } else {
2276                 int ret;
2277
2278                 ret = tg3_phy_auxctl_read(tp,
2279                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280                 if (!ret) {
2281                         if (enable)
2282                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283                         else
2284                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285                         tg3_phy_auxctl_write(tp,
2286                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2287                 }
2288         }
2289 }
2290
2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2292 {
2293         int ret;
2294         u32 val;
2295
2296         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297                 return;
2298
2299         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300         if (!ret)
2301                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 }
2304
2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 {
2307         u32 otp, phy;
2308
2309         if (!tp->phy_otp)
2310                 return;
2311
2312         otp = tp->phy_otp;
2313
2314         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315                 return;
2316
2317         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320
2321         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324
2325         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328
2329         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331
2332         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334
2335         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338
2339         tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 }
2341
2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2343 {
2344         u32 val;
2345         struct ethtool_eee *dest = &tp->eee;
2346
2347         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348                 return;
2349
2350         if (eee)
2351                 dest = eee;
2352
2353         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354                 return;
2355
2356         /* Pull eee_active */
2357         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359                 dest->eee_active = 1;
2360         } else
2361                 dest->eee_active = 0;
2362
2363         /* Pull lp advertised settings */
2364         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365                 return;
2366         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2367
2368         /* Pull advertised and eee_enabled settings */
2369         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370                 return;
2371         dest->eee_enabled = !!val;
2372         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373
2374         /* Pull tx_lpi_enabled */
2375         val = tr32(TG3_CPMU_EEE_MODE);
2376         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377
2378         /* Pull lpi timer value */
2379         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 }
2381
2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2383 {
2384         u32 val;
2385
2386         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2387                 return;
2388
2389         tp->setlpicnt = 0;
2390
2391         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392             current_link_up &&
2393             tp->link_config.active_duplex == DUPLEX_FULL &&
2394             (tp->link_config.active_speed == SPEED_100 ||
2395              tp->link_config.active_speed == SPEED_1000)) {
2396                 u32 eeectl;
2397
2398                 if (tp->link_config.active_speed == SPEED_1000)
2399                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400                 else
2401                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402
2403                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404
2405                 tg3_eee_pull_config(tp, NULL);
2406                 if (tp->eee.eee_active)
2407                         tp->setlpicnt = 2;
2408         }
2409
2410         if (!tp->setlpicnt) {
2411                 if (current_link_up &&
2412                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2415                 }
2416
2417                 val = tr32(TG3_CPMU_EEE_MODE);
2418                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2419         }
2420 }
2421
2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2423 {
2424         u32 val;
2425
2426         if (tp->link_config.active_speed == SPEED_1000 &&
2427             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429              tg3_flag(tp, 57765_CLASS)) &&
2430             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431                 val = MII_TG3_DSP_TAP26_ALNOKO |
2432                       MII_TG3_DSP_TAP26_RMRXSTO;
2433                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2435         }
2436
2437         val = tr32(TG3_CPMU_EEE_MODE);
2438         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 }
2440
2441 static int tg3_wait_macro_done(struct tg3 *tp)
2442 {
2443         int limit = 100;
2444
2445         while (limit--) {
2446                 u32 tmp32;
2447
2448                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449                         if ((tmp32 & 0x1000) == 0)
2450                                 break;
2451                 }
2452         }
2453         if (limit < 0)
2454                 return -EBUSY;
2455
2456         return 0;
2457 }
2458
2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 {
2461         static const u32 test_pat[4][6] = {
2462         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2466         };
2467         int chan;
2468
2469         for (chan = 0; chan < 4; chan++) {
2470                 int i;
2471
2472                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473                              (chan * 0x2000) | 0x0200);
2474                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475
2476                 for (i = 0; i < 6; i++)
2477                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478                                      test_pat[chan][i]);
2479
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481                 if (tg3_wait_macro_done(tp)) {
2482                         *resetp = 1;
2483                         return -EBUSY;
2484                 }
2485
2486                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487                              (chan * 0x2000) | 0x0200);
2488                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489                 if (tg3_wait_macro_done(tp)) {
2490                         *resetp = 1;
2491                         return -EBUSY;
2492                 }
2493
2494                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495                 if (tg3_wait_macro_done(tp)) {
2496                         *resetp = 1;
2497                         return -EBUSY;
2498                 }
2499
2500                 for (i = 0; i < 6; i += 2) {
2501                         u32 low, high;
2502
2503                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505                             tg3_wait_macro_done(tp)) {
2506                                 *resetp = 1;
2507                                 return -EBUSY;
2508                         }
2509                         low &= 0x7fff;
2510                         high &= 0x000f;
2511                         if (low != test_pat[chan][i] ||
2512                             high != test_pat[chan][i+1]) {
2513                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2516
2517                                 return -EBUSY;
2518                         }
2519                 }
2520         }
2521
2522         return 0;
2523 }
2524
2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2526 {
2527         int chan;
2528
2529         for (chan = 0; chan < 4; chan++) {
2530                 int i;
2531
2532                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533                              (chan * 0x2000) | 0x0200);
2534                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535                 for (i = 0; i < 6; i++)
2536                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538                 if (tg3_wait_macro_done(tp))
2539                         return -EBUSY;
2540         }
2541
2542         return 0;
2543 }
2544
2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 {
2547         u32 reg32, phy9_orig;
2548         int retries, do_phy_reset, err;
2549
2550         retries = 10;
2551         do_phy_reset = 1;
2552         do {
2553                 if (do_phy_reset) {
2554                         err = tg3_bmcr_reset(tp);
2555                         if (err)
2556                                 return err;
2557                         do_phy_reset = 0;
2558                 }
2559
2560                 /* Disable transmitter and interrupt.  */
2561                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2562                         continue;
2563
2564                 reg32 |= 0x3000;
2565                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566
2567                 /* Set full-duplex, 1000 mbps.  */
2568                 tg3_writephy(tp, MII_BMCR,
2569                              BMCR_FULLDPLX | BMCR_SPEED1000);
2570
2571                 /* Set to master mode.  */
2572                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573                         continue;
2574
2575                 tg3_writephy(tp, MII_CTRL1000,
2576                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577
2578                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2579                 if (err)
2580                         return err;
2581
2582                 /* Block the PHY control access.  */
2583                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2584
2585                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586                 if (!err)
2587                         break;
2588         } while (--retries);
2589
2590         err = tg3_phy_reset_chanpat(tp);
2591         if (err)
2592                 return err;
2593
2594         tg3_phydsp_write(tp, 0x8005, 0x0000);
2595
2596         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598
2599         tg3_phy_toggle_auxctl_smdsp(tp, false);
2600
2601         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602
2603         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2604         if (err)
2605                 return err;
2606
2607         reg32 &= ~0x3000;
2608         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2609
2610         return 0;
2611 }
2612
2613 static void tg3_carrier_off(struct tg3 *tp)
2614 {
2615         netif_carrier_off(tp->dev);
2616         tp->link_up = false;
2617 }
2618
2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 {
2621         if (tg3_flag(tp, ENABLE_ASF))
2622                 netdev_warn(tp->dev,
2623                             "Management side-band traffic will be interrupted during phy settings change\n");
2624 }
2625
2626 /* This will reset the tigon3 PHY if there is no valid
2627  * link unless the FORCE argument is non-zero.
2628  */
2629 static int tg3_phy_reset(struct tg3 *tp)
2630 {
2631         u32 val, cpmuctrl;
2632         int err;
2633
2634         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635                 val = tr32(GRC_MISC_CFG);
2636                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637                 udelay(40);
2638         }
2639         err  = tg3_readphy(tp, MII_BMSR, &val);
2640         err |= tg3_readphy(tp, MII_BMSR, &val);
2641         if (err != 0)
2642                 return -EBUSY;
2643
2644         if (netif_running(tp->dev) && tp->link_up) {
2645                 netif_carrier_off(tp->dev);
2646                 tg3_link_report(tp);
2647         }
2648
2649         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651             tg3_asic_rev(tp) == ASIC_REV_5705) {
2652                 err = tg3_phy_reset_5703_4_5(tp);
2653                 if (err)
2654                         return err;
2655                 goto out;
2656         }
2657
2658         cpmuctrl = 0;
2659         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2662                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663                         tw32(TG3_CPMU_CTRL,
2664                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665         }
2666
2667         err = tg3_bmcr_reset(tp);
2668         if (err)
2669                 return err;
2670
2671         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674
2675                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2676         }
2677
2678         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2683                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684                         udelay(40);
2685                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2686                 }
2687         }
2688
2689         if (tg3_flag(tp, 5717_PLUS) &&
2690             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691                 return 0;
2692
2693         tg3_phy_apply_otp(tp);
2694
2695         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696                 tg3_phy_toggle_apd(tp, true);
2697         else
2698                 tg3_phy_toggle_apd(tp, false);
2699
2700 out:
2701         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2705                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706         }
2707
2708         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711         }
2712
2713         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2716                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2717                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2719                 }
2720         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725                                 tg3_writephy(tp, MII_TG3_TEST1,
2726                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2727                         } else
2728                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729
2730                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2731                 }
2732         }
2733
2734         /* Set Extended packet length bit (bit 14) on all chips that */
2735         /* support jumbo frames */
2736         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737                 /* Cannot do read-modify-write on 5401 */
2738                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740                 /* Set bit 14 with read-modify-write to preserve other bits */
2741                 err = tg3_phy_auxctl_read(tp,
2742                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743                 if (!err)
2744                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746         }
2747
2748         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749          * jumbo frames transmission.
2750          */
2751         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755         }
2756
2757         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758                 /* adjust output voltage */
2759                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760         }
2761
2762         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2764
2765         tg3_phy_toggle_automdix(tp, true);
2766         tg3_phy_set_wirespeed(tp);
2767         return 0;
2768 }
2769
2770 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2772 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2773                                           TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778          (TG3_GPIO_MSG_DRVR_PRES << 12))
2779
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784          (TG3_GPIO_MSG_NEED_VAUX << 12))
2785
2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2787 {
2788         u32 status, shift;
2789
2790         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791             tg3_asic_rev(tp) == ASIC_REV_5719)
2792                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793         else
2794                 status = tr32(TG3_CPMU_DRV_STATUS);
2795
2796         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797         status &= ~(TG3_GPIO_MSG_MASK << shift);
2798         status |= (newstat << shift);
2799
2800         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801             tg3_asic_rev(tp) == ASIC_REV_5719)
2802                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803         else
2804                 tw32(TG3_CPMU_DRV_STATUS, status);
2805
2806         return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 }
2808
2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 {
2811         if (!tg3_flag(tp, IS_NIC))
2812                 return 0;
2813
2814         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816             tg3_asic_rev(tp) == ASIC_REV_5720) {
2817                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818                         return -EIO;
2819
2820                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821
2822                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2824
2825                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826         } else {
2827                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2829         }
2830
2831         return 0;
2832 }
2833
2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2835 {
2836         u32 grc_local_ctrl;
2837
2838         if (!tg3_flag(tp, IS_NIC) ||
2839             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840             tg3_asic_rev(tp) == ASIC_REV_5701)
2841                 return;
2842
2843         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844
2845         tw32_wait_f(GRC_LOCAL_CTRL,
2846                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2848
2849         tw32_wait_f(GRC_LOCAL_CTRL,
2850                     grc_local_ctrl,
2851                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2852
2853         tw32_wait_f(GRC_LOCAL_CTRL,
2854                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 }
2857
2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 {
2860         if (!tg3_flag(tp, IS_NIC))
2861                 return;
2862
2863         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864             tg3_asic_rev(tp) == ASIC_REV_5701) {
2865                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866                             (GRC_LCLCTRL_GPIO_OE0 |
2867                              GRC_LCLCTRL_GPIO_OE1 |
2868                              GRC_LCLCTRL_GPIO_OE2 |
2869                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2870                              GRC_LCLCTRL_GPIO_OUTPUT1),
2871                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2872         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876                                      GRC_LCLCTRL_GPIO_OE1 |
2877                                      GRC_LCLCTRL_GPIO_OE2 |
2878                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2879                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2880                                      tp->grc_local_ctrl;
2881                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2887
2888                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2891         } else {
2892                 u32 no_gpio2;
2893                 u32 grc_local_ctrl = 0;
2894
2895                 /* Workaround to prevent overdrawing Amps. */
2896                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899                                     grc_local_ctrl,
2900                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2901                 }
2902
2903                 /* On 5753 and variants, GPIO2 cannot be used. */
2904                 no_gpio2 = tp->nic_sram_data_cfg &
2905                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2906
2907                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908                                   GRC_LCLCTRL_GPIO_OE1 |
2909                                   GRC_LCLCTRL_GPIO_OE2 |
2910                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2911                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2912                 if (no_gpio2) {
2913                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2915                 }
2916                 tw32_wait_f(GRC_LOCAL_CTRL,
2917                             tp->grc_local_ctrl | grc_local_ctrl,
2918                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2919
2920                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921
2922                 tw32_wait_f(GRC_LOCAL_CTRL,
2923                             tp->grc_local_ctrl | grc_local_ctrl,
2924                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2925
2926                 if (!no_gpio2) {
2927                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928                         tw32_wait_f(GRC_LOCAL_CTRL,
2929                                     tp->grc_local_ctrl | grc_local_ctrl,
2930                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2931                 }
2932         }
2933 }
2934
2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2936 {
2937         u32 msg = 0;
2938
2939         /* Serialize power state transitions */
2940         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941                 return;
2942
2943         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944                 msg = TG3_GPIO_MSG_NEED_VAUX;
2945
2946         msg = tg3_set_function_status(tp, msg);
2947
2948         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949                 goto done;
2950
2951         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952                 tg3_pwrsrc_switch_to_vaux(tp);
2953         else
2954                 tg3_pwrsrc_die_with_vmain(tp);
2955
2956 done:
2957         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 }
2959
2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 {
2962         bool need_vaux = false;
2963
2964         /* The GPIOs do something completely different on 57765. */
2965         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966                 return;
2967
2968         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970             tg3_asic_rev(tp) == ASIC_REV_5720) {
2971                 tg3_frob_aux_power_5717(tp, include_wol ?
2972                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2973                 return;
2974         }
2975
2976         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977                 struct net_device *dev_peer;
2978
2979                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2980
2981                 /* remove_one() may have been run on the peer. */
2982                 if (dev_peer) {
2983                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2984
2985                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2986                                 return;
2987
2988                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989                             tg3_flag(tp_peer, ENABLE_ASF))
2990                                 need_vaux = true;
2991                 }
2992         }
2993
2994         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995             tg3_flag(tp, ENABLE_ASF))
2996                 need_vaux = true;
2997
2998         if (need_vaux)
2999                 tg3_pwrsrc_switch_to_vaux(tp);
3000         else
3001                 tg3_pwrsrc_die_with_vmain(tp);
3002 }
3003
3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 {
3006         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007                 return 1;
3008         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009                 if (speed != SPEED_10)
3010                         return 1;
3011         } else if (speed == SPEED_10)
3012                 return 1;
3013
3014         return 0;
3015 }
3016
3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 {
3019         switch (tg3_asic_rev(tp)) {
3020         case ASIC_REV_5700:
3021         case ASIC_REV_5704:
3022                 return true;
3023         case ASIC_REV_5780:
3024                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3025                         return true;
3026                 return false;
3027         case ASIC_REV_5717:
3028                 if (!tp->pci_fn)
3029                         return true;
3030                 return false;
3031         case ASIC_REV_5719:
3032         case ASIC_REV_5720:
3033                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3034                     !tp->pci_fn)
3035                         return true;
3036                 return false;
3037         }
3038
3039         return false;
3040 }
3041
3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 {
3044         switch (tg3_asic_rev(tp)) {
3045         case ASIC_REV_5719:
3046         case ASIC_REV_5720:
3047                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3048                     !tp->pci_fn)
3049                         return true;
3050                 return false;
3051         }
3052
3053         return false;
3054 }
3055
3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3057 {
3058         u32 val;
3059
3060         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061                 return;
3062
3063         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067
3068                         sg_dig_ctrl |=
3069                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3072                 }
3073                 return;
3074         }
3075
3076         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077                 tg3_bmcr_reset(tp);
3078                 val = tr32(GRC_MISC_CFG);
3079                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080                 udelay(40);
3081                 return;
3082         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083                 u32 phytest;
3084                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085                         u32 phy;
3086
3087                         tg3_writephy(tp, MII_ADVERTISE, 0);
3088                         tg3_writephy(tp, MII_BMCR,
3089                                      BMCR_ANENABLE | BMCR_ANRESTART);
3090
3091                         tg3_writephy(tp, MII_TG3_FET_TEST,
3092                                      phytest | MII_TG3_FET_SHADOW_EN);
3093                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095                                 tg3_writephy(tp,
3096                                              MII_TG3_FET_SHDW_AUXMODE4,
3097                                              phy);
3098                         }
3099                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100                 }
3101                 return;
3102         } else if (do_low_power) {
3103                 if (!tg3_phy_led_bug(tp))
3104                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106
3107                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3110                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111         }
3112
3113         /* The PHY should not be powered down on some chips because
3114          * of bugs.
3115          */
3116         if (tg3_phy_power_bug(tp))
3117                 return;
3118
3119         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125         }
3126
3127         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 }
3129
3130 /* tp->lock is held. */
3131 static int tg3_nvram_lock(struct tg3 *tp)
3132 {
3133         if (tg3_flag(tp, NVRAM)) {
3134                 int i;
3135
3136                 if (tp->nvram_lock_cnt == 0) {
3137                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138                         for (i = 0; i < 8000; i++) {
3139                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3140                                         break;
3141                                 udelay(20);
3142                         }
3143                         if (i == 8000) {
3144                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3145                                 return -ENODEV;
3146                         }
3147                 }
3148                 tp->nvram_lock_cnt++;
3149         }
3150         return 0;
3151 }
3152
3153 /* tp->lock is held. */
3154 static void tg3_nvram_unlock(struct tg3 *tp)
3155 {
3156         if (tg3_flag(tp, NVRAM)) {
3157                 if (tp->nvram_lock_cnt > 0)
3158                         tp->nvram_lock_cnt--;
3159                 if (tp->nvram_lock_cnt == 0)
3160                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3161         }
3162 }
3163
3164 /* tp->lock is held. */
3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 {
3167         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168                 u32 nvaccess = tr32(NVRAM_ACCESS);
3169
3170                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3171         }
3172 }
3173
3174 /* tp->lock is held. */
3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 {
3177         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178                 u32 nvaccess = tr32(NVRAM_ACCESS);
3179
3180                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3181         }
3182 }
3183
3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185                                         u32 offset, u32 *val)
3186 {
3187         u32 tmp;
3188         int i;
3189
3190         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191                 return -EINVAL;
3192
3193         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194                                         EEPROM_ADDR_DEVID_MASK |
3195                                         EEPROM_ADDR_READ);
3196         tw32(GRC_EEPROM_ADDR,
3197              tmp |
3198              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200               EEPROM_ADDR_ADDR_MASK) |
3201              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202
3203         for (i = 0; i < 1000; i++) {
3204                 tmp = tr32(GRC_EEPROM_ADDR);
3205
3206                 if (tmp & EEPROM_ADDR_COMPLETE)
3207                         break;
3208                 msleep(1);
3209         }
3210         if (!(tmp & EEPROM_ADDR_COMPLETE))
3211                 return -EBUSY;
3212
3213         tmp = tr32(GRC_EEPROM_DATA);
3214
3215         /*
3216          * The data will always be opposite the native endian
3217          * format.  Perform a blind byteswap to compensate.
3218          */
3219         *val = swab32(tmp);
3220
3221         return 0;
3222 }
3223
3224 #define NVRAM_CMD_TIMEOUT 10000
3225
3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3227 {
3228         int i;
3229
3230         tw32(NVRAM_CMD, nvram_cmd);
3231         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232                 usleep_range(10, 40);
3233                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234                         udelay(10);
3235                         break;
3236                 }
3237         }
3238
3239         if (i == NVRAM_CMD_TIMEOUT)
3240                 return -EBUSY;
3241
3242         return 0;
3243 }
3244
3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 {
3247         if (tg3_flag(tp, NVRAM) &&
3248             tg3_flag(tp, NVRAM_BUFFERED) &&
3249             tg3_flag(tp, FLASH) &&
3250             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251             (tp->nvram_jedecnum == JEDEC_ATMEL))
3252
3253                 addr = ((addr / tp->nvram_pagesize) <<
3254                         ATMEL_AT45DB0X1B_PAGE_POS) +
3255                        (addr % tp->nvram_pagesize);
3256
3257         return addr;
3258 }
3259
3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 {
3262         if (tg3_flag(tp, NVRAM) &&
3263             tg3_flag(tp, NVRAM_BUFFERED) &&
3264             tg3_flag(tp, FLASH) &&
3265             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266             (tp->nvram_jedecnum == JEDEC_ATMEL))
3267
3268                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269                         tp->nvram_pagesize) +
3270                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3271
3272         return addr;
3273 }
3274
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276  * the byteswapping settings for all other register accesses.
3277  * tg3 devices are BE devices, so on a BE machine, the data
3278  * returned will be exactly as it is seen in NVRAM.  On a LE
3279  * machine, the 32-bit value will be byteswapped.
3280  */
3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3282 {
3283         int ret;
3284
3285         if (!tg3_flag(tp, NVRAM))
3286                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3287
3288         offset = tg3_nvram_phys_addr(tp, offset);
3289
3290         if (offset > NVRAM_ADDR_MSK)
3291                 return -EINVAL;
3292
3293         ret = tg3_nvram_lock(tp);
3294         if (ret)
3295                 return ret;
3296
3297         tg3_enable_nvram_access(tp);
3298
3299         tw32(NVRAM_ADDR, offset);
3300         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302
3303         if (ret == 0)
3304                 *val = tr32(NVRAM_RDDATA);
3305
3306         tg3_disable_nvram_access(tp);
3307
3308         tg3_nvram_unlock(tp);
3309
3310         return ret;
3311 }
3312
3313 /* Ensures NVRAM data is in bytestream format. */
3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 {
3316         u32 v;
3317         int res = tg3_nvram_read(tp, offset, &v);
3318         if (!res)
3319                 *val = cpu_to_be32(v);
3320         return res;
3321 }
3322
3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324                                     u32 offset, u32 len, u8 *buf)
3325 {
3326         int i, j, rc = 0;
3327         u32 val;
3328
3329         for (i = 0; i < len; i += 4) {
3330                 u32 addr;
3331                 __be32 data;
3332
3333                 addr = offset + i;
3334
3335                 memcpy(&data, buf + i, 4);
3336
3337                 /*
3338                  * The SEEPROM interface expects the data to always be opposite
3339                  * the native endian format.  We accomplish this by reversing
3340                  * all the operations that would have been performed on the
3341                  * data from a call to tg3_nvram_read_be32().
3342                  */
3343                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344
3345                 val = tr32(GRC_EEPROM_ADDR);
3346                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347
3348                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349                         EEPROM_ADDR_READ);
3350                 tw32(GRC_EEPROM_ADDR, val |
3351                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3352                         (addr & EEPROM_ADDR_ADDR_MASK) |
3353                         EEPROM_ADDR_START |
3354                         EEPROM_ADDR_WRITE);
3355
3356                 for (j = 0; j < 1000; j++) {
3357                         val = tr32(GRC_EEPROM_ADDR);
3358
3359                         if (val & EEPROM_ADDR_COMPLETE)
3360                                 break;
3361                         msleep(1);
3362                 }
3363                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3364                         rc = -EBUSY;
3365                         break;
3366                 }
3367         }
3368
3369         return rc;
3370 }
3371
3372 /* offset and length are dword aligned */
3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3374                 u8 *buf)
3375 {
3376         int ret = 0;
3377         u32 pagesize = tp->nvram_pagesize;
3378         u32 pagemask = pagesize - 1;
3379         u32 nvram_cmd;
3380         u8 *tmp;
3381
3382         tmp = kmalloc(pagesize, GFP_KERNEL);
3383         if (tmp == NULL)
3384                 return -ENOMEM;
3385
3386         while (len) {
3387                 int j;
3388                 u32 phy_addr, page_off, size;
3389
3390                 phy_addr = offset & ~pagemask;
3391
3392                 for (j = 0; j < pagesize; j += 4) {
3393                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394                                                   (__be32 *) (tmp + j));
3395                         if (ret)
3396                                 break;
3397                 }
3398                 if (ret)
3399                         break;
3400
3401                 page_off = offset & pagemask;
3402                 size = pagesize;
3403                 if (len < size)
3404                         size = len;
3405
3406                 len -= size;
3407
3408                 memcpy(tmp + page_off, buf, size);
3409
3410                 offset = offset + (pagesize - page_off);
3411
3412                 tg3_enable_nvram_access(tp);
3413
3414                 /*
3415                  * Before we can erase the flash page, we need
3416                  * to issue a special "write enable" command.
3417                  */
3418                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419
3420                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421                         break;
3422
3423                 /* Erase the target page */
3424                 tw32(NVRAM_ADDR, phy_addr);
3425
3426                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428
3429                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430                         break;
3431
3432                 /* Issue another write enable to start the write. */
3433                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434
3435                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436                         break;
3437
3438                 for (j = 0; j < pagesize; j += 4) {
3439                         __be32 data;
3440
3441                         data = *((__be32 *) (tmp + j));
3442
3443                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444
3445                         tw32(NVRAM_ADDR, phy_addr + j);
3446
3447                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3448                                 NVRAM_CMD_WR;
3449
3450                         if (j == 0)
3451                                 nvram_cmd |= NVRAM_CMD_FIRST;
3452                         else if (j == (pagesize - 4))
3453                                 nvram_cmd |= NVRAM_CMD_LAST;
3454
3455                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3456                         if (ret)
3457                                 break;
3458                 }
3459                 if (ret)
3460                         break;
3461         }
3462
3463         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464         tg3_nvram_exec_cmd(tp, nvram_cmd);
3465
3466         kfree(tmp);
3467
3468         return ret;
3469 }
3470
3471 /* offset and length are dword aligned */
3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3473                 u8 *buf)
3474 {
3475         int i, ret = 0;
3476
3477         for (i = 0; i < len; i += 4, offset += 4) {
3478                 u32 page_off, phy_addr, nvram_cmd;
3479                 __be32 data;
3480
3481                 memcpy(&data, buf + i, 4);
3482                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483
3484                 page_off = offset % tp->nvram_pagesize;
3485
3486                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3487
3488                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489
3490                 if (page_off == 0 || i == 0)
3491                         nvram_cmd |= NVRAM_CMD_FIRST;
3492                 if (page_off == (tp->nvram_pagesize - 4))
3493                         nvram_cmd |= NVRAM_CMD_LAST;
3494
3495                 if (i == (len - 4))
3496                         nvram_cmd |= NVRAM_CMD_LAST;
3497
3498                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499                     !tg3_flag(tp, FLASH) ||
3500                     !tg3_flag(tp, 57765_PLUS))
3501                         tw32(NVRAM_ADDR, phy_addr);
3502
3503                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504                     !tg3_flag(tp, 5755_PLUS) &&
3505                     (tp->nvram_jedecnum == JEDEC_ST) &&
3506                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3507                         u32 cmd;
3508
3509                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510                         ret = tg3_nvram_exec_cmd(tp, cmd);
3511                         if (ret)
3512                                 break;
3513                 }
3514                 if (!tg3_flag(tp, FLASH)) {
3515                         /* We always do complete word writes to eeprom. */
3516                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517                 }
3518
3519                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3520                 if (ret)
3521                         break;
3522         }
3523         return ret;
3524 }
3525
3526 /* offset and length are dword aligned */
3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3528 {
3529         int ret;
3530
3531         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3534                 udelay(40);
3535         }
3536
3537         if (!tg3_flag(tp, NVRAM)) {
3538                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3539         } else {
3540                 u32 grc_mode;
3541
3542                 ret = tg3_nvram_lock(tp);
3543                 if (ret)
3544                         return ret;
3545
3546                 tg3_enable_nvram_access(tp);
3547                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548                         tw32(NVRAM_WRITE1, 0x406);
3549
3550                 grc_mode = tr32(GRC_MODE);
3551                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552
3553                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555                                 buf);
3556                 } else {
3557                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3558                                 buf);
3559                 }
3560
3561                 grc_mode = tr32(GRC_MODE);
3562                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563
3564                 tg3_disable_nvram_access(tp);
3565                 tg3_nvram_unlock(tp);
3566         }
3567
3568         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3570                 udelay(40);
3571         }
3572
3573         return ret;
3574 }
3575
3576 #define RX_CPU_SCRATCH_BASE     0x30000
3577 #define RX_CPU_SCRATCH_SIZE     0x04000
3578 #define TX_CPU_SCRATCH_BASE     0x34000
3579 #define TX_CPU_SCRATCH_SIZE     0x04000
3580
3581 /* tp->lock is held. */
3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 {
3584         int i;
3585         const int iters = 10000;
3586
3587         for (i = 0; i < iters; i++) {
3588                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3589                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3590                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591                         break;
3592                 if (pci_channel_offline(tp->pdev))
3593                         return -EBUSY;
3594         }
3595
3596         return (i == iters) ? -EBUSY : 0;
3597 }
3598
3599 /* tp->lock is held. */
3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 {
3602         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603
3604         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3606         udelay(10);
3607
3608         return rc;
3609 }
3610
3611 /* tp->lock is held. */
3612 static int tg3_txcpu_pause(struct tg3 *tp)
3613 {
3614         return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 }
3616
3617 /* tp->lock is held. */
3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 {
3620         tw32(cpu_base + CPU_STATE, 0xffffffff);
3621         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3622 }
3623
3624 /* tp->lock is held. */
3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 {
3627         tg3_resume_cpu(tp, RX_CPU_BASE);
3628 }
3629
3630 /* tp->lock is held. */
3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633         int rc;
3634
3635         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636
3637         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639
3640                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641                 return 0;
3642         }
3643         if (cpu_base == RX_CPU_BASE) {
3644                 rc = tg3_rxcpu_pause(tp);
3645         } else {
3646                 /*
3647                  * There is only an Rx CPU for the 5750 derivative in the
3648                  * BCM4785.
3649                  */
3650                 if (tg3_flag(tp, IS_SSB_CORE))
3651                         return 0;
3652
3653                 rc = tg3_txcpu_pause(tp);
3654         }
3655
3656         if (rc) {
3657                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3659                 return -ENODEV;
3660         }
3661
3662         /* Clear firmware's nvram arbitration. */
3663         if (tg3_flag(tp, NVRAM))
3664                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3665         return 0;
3666 }
3667
3668 static int tg3_fw_data_len(struct tg3 *tp,
3669                            const struct tg3_firmware_hdr *fw_hdr)
3670 {
3671         int fw_len;
3672
3673         /* Non fragmented firmware have one firmware header followed by a
3674          * contiguous chunk of data to be written. The length field in that
3675          * header is not the length of data to be written but the complete
3676          * length of the bss. The data length is determined based on
3677          * tp->fw->size minus headers.
3678          *
3679          * Fragmented firmware have a main header followed by multiple
3680          * fragments. Each fragment is identical to non fragmented firmware
3681          * with a firmware header followed by a contiguous chunk of data. In
3682          * the main header, the length field is unused and set to 0xffffffff.
3683          * In each fragment header the length is the entire size of that
3684          * fragment i.e. fragment data + header length. Data length is
3685          * therefore length field in the header minus TG3_FW_HDR_LEN.
3686          */
3687         if (tp->fw_len == 0xffffffff)
3688                 fw_len = be32_to_cpu(fw_hdr->len);
3689         else
3690                 fw_len = tp->fw->size;
3691
3692         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 }
3694
3695 /* tp->lock is held. */
3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697                                  u32 cpu_scratch_base, int cpu_scratch_size,
3698                                  const struct tg3_firmware_hdr *fw_hdr)
3699 {
3700         int err, i;
3701         void (*write_op)(struct tg3 *, u32, u32);
3702         int total_len = tp->fw->size;
3703
3704         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705                 netdev_err(tp->dev,
3706                            "%s: Trying to load TX cpu firmware which is 5705\n",
3707                            __func__);
3708                 return -EINVAL;
3709         }
3710
3711         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712                 write_op = tg3_write_mem;
3713         else
3714                 write_op = tg3_write_indirect_reg32;
3715
3716         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717                 /* It is possible that bootcode is still loading at this point.
3718                  * Get the nvram lock first before halting the cpu.
3719                  */
3720                 int lock_err = tg3_nvram_lock(tp);
3721                 err = tg3_halt_cpu(tp, cpu_base);
3722                 if (!lock_err)
3723                         tg3_nvram_unlock(tp);
3724                 if (err)
3725                         goto out;
3726
3727                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728                         write_op(tp, cpu_scratch_base + i, 0);
3729                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3730                 tw32(cpu_base + CPU_MODE,
3731                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732         } else {
3733                 /* Subtract additional main header for fragmented firmware and
3734                  * advance to the first fragment
3735                  */
3736                 total_len -= TG3_FW_HDR_LEN;
3737                 fw_hdr++;
3738         }
3739
3740         do {
3741                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3742                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743                         write_op(tp, cpu_scratch_base +
3744                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745                                      (i * sizeof(u32)),
3746                                  be32_to_cpu(fw_data[i]));
3747
3748                 total_len -= be32_to_cpu(fw_hdr->len);
3749
3750                 /* Advance to next fragment */
3751                 fw_hdr = (struct tg3_firmware_hdr *)
3752                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753         } while (total_len > 0);
3754
3755         err = 0;
3756
3757 out:
3758         return err;
3759 }
3760
3761 /* tp->lock is held. */
3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 {
3764         int i;
3765         const int iters = 5;
3766
3767         tw32(cpu_base + CPU_STATE, 0xffffffff);
3768         tw32_f(cpu_base + CPU_PC, pc);
3769
3770         for (i = 0; i < iters; i++) {
3771                 if (tr32(cpu_base + CPU_PC) == pc)
3772                         break;
3773                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3774                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3775                 tw32_f(cpu_base + CPU_PC, pc);
3776                 udelay(1000);
3777         }
3778
3779         return (i == iters) ? -EBUSY : 0;
3780 }
3781
3782 /* tp->lock is held. */
3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 {
3785         const struct tg3_firmware_hdr *fw_hdr;
3786         int err;
3787
3788         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789
3790         /* Firmware blob starts with version numbers, followed by
3791            start address and length. We are setting complete length.
3792            length = end_address_of_bss - start_address_of_text.
3793            Remainder is the blob to be loaded contiguously
3794            from start address. */
3795
3796         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3798                                     fw_hdr);
3799         if (err)
3800                 return err;
3801
3802         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3804                                     fw_hdr);
3805         if (err)
3806                 return err;
3807
3808         /* Now startup only the RX cpu. */
3809         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810                                        be32_to_cpu(fw_hdr->base_addr));
3811         if (err) {
3812                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813                            "should be %08x\n", __func__,
3814                            tr32(RX_CPU_BASE + CPU_PC),
3815                                 be32_to_cpu(fw_hdr->base_addr));
3816                 return -ENODEV;
3817         }
3818
3819         tg3_rxcpu_resume(tp);
3820
3821         return 0;
3822 }
3823
3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 {
3826         const int iters = 1000;
3827         int i;
3828         u32 val;
3829
3830         /* Wait for boot code to complete initialization and enter service
3831          * loop. It is then safe to download service patches
3832          */
3833         for (i = 0; i < iters; i++) {
3834                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3835                         break;
3836
3837                 udelay(10);
3838         }
3839
3840         if (i == iters) {
3841                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3842                 return -EBUSY;
3843         }
3844
3845         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846         if (val & 0xff) {
3847                 netdev_warn(tp->dev,
3848                             "Other patches exist. Not downloading EEE patch\n");
3849                 return -EEXIST;
3850         }
3851
3852         return 0;
3853 }
3854
3855 /* tp->lock is held. */
3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 {
3858         struct tg3_firmware_hdr *fw_hdr;
3859
3860         if (!tg3_flag(tp, NO_NVRAM))
3861                 return;
3862
3863         if (tg3_validate_rxcpu_state(tp))
3864                 return;
3865
3866         if (!tp->fw)
3867                 return;
3868
3869         /* This firmware blob has a different format than older firmware
3870          * releases as given below. The main difference is we have fragmented
3871          * data to be written to non-contiguous locations.
3872          *
3873          * In the beginning we have a firmware header identical to other
3874          * firmware which consists of version, base addr and length. The length
3875          * here is unused and set to 0xffffffff.
3876          *
3877          * This is followed by a series of firmware fragments which are
3878          * individually identical to previous firmware. i.e. they have the
3879          * firmware header and followed by data for that fragment. The version
3880          * field of the individual fragment header is unused.
3881          */
3882
3883         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885                 return;
3886
3887         if (tg3_rxcpu_pause(tp))
3888                 return;
3889
3890         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892
3893         tg3_rxcpu_resume(tp);
3894 }
3895
3896 /* tp->lock is held. */
3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 {
3899         const struct tg3_firmware_hdr *fw_hdr;
3900         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901         int err;
3902
3903         if (!tg3_flag(tp, FW_TSO))
3904                 return 0;
3905
3906         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907
3908         /* Firmware blob starts with version numbers, followed by
3909            start address and length. We are setting complete length.
3910            length = end_address_of_bss - start_address_of_text.
3911            Remainder is the blob to be loaded contiguously
3912            from start address. */
3913
3914         cpu_scratch_size = tp->fw_len;
3915
3916         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917                 cpu_base = RX_CPU_BASE;
3918                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919         } else {
3920                 cpu_base = TX_CPU_BASE;
3921                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923         }
3924
3925         err = tg3_load_firmware_cpu(tp, cpu_base,
3926                                     cpu_scratch_base, cpu_scratch_size,
3927                                     fw_hdr);
3928         if (err)
3929                 return err;
3930
3931         /* Now startup the cpu. */
3932         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933                                        be32_to_cpu(fw_hdr->base_addr));
3934         if (err) {
3935                 netdev_err(tp->dev,
3936                            "%s fails to set CPU PC, is %08x should be %08x\n",
3937                            __func__, tr32(cpu_base + CPU_PC),
3938                            be32_to_cpu(fw_hdr->base_addr));
3939                 return -ENODEV;
3940         }
3941
3942         tg3_resume_cpu(tp, cpu_base);
3943         return 0;
3944 }
3945
3946 /* tp->lock is held. */
3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948                                    int index)
3949 {
3950         u32 addr_high, addr_low;
3951
3952         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954                     (mac_addr[4] <<  8) | mac_addr[5]);
3955
3956         if (index < 4) {
3957                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959         } else {
3960                 index -= 4;
3961                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963         }
3964 }
3965
3966 /* tp->lock is held. */
3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969         u32 addr_high;
3970         int i;
3971
3972         for (i = 0; i < 4; i++) {
3973                 if (i == 1 && skip_mac_1)
3974                         continue;
3975                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976         }
3977
3978         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979             tg3_asic_rev(tp) == ASIC_REV_5704) {
3980                 for (i = 4; i < 16; i++)
3981                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982         }
3983
3984         addr_high = (tp->dev->dev_addr[0] +
3985                      tp->dev->dev_addr[1] +
3986                      tp->dev->dev_addr[2] +
3987                      tp->dev->dev_addr[3] +
3988                      tp->dev->dev_addr[4] +
3989                      tp->dev->dev_addr[5]) &
3990                 TX_BACKOFF_SEED_MASK;
3991         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993
3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996         /*
3997          * Make sure register accesses (indirect or otherwise) will function
3998          * correctly.
3999          */
4000         pci_write_config_dword(tp->pdev,
4001                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003
4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006         int err;
4007
4008         tg3_enable_register_access(tp);
4009
4010         err = pci_set_power_state(tp->pdev, PCI_D0);
4011         if (!err) {
4012                 /* Switch out of Vaux if it is a NIC */
4013                 tg3_pwrsrc_switch_to_vmain(tp);
4014         } else {
4015                 netdev_err(tp->dev, "Transition to D0 failed\n");
4016         }
4017
4018         return err;
4019 }
4020
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022
4023 static int tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025         u32 misc_host_ctrl;
4026         bool device_should_wake, do_low_power;
4027
4028         tg3_enable_register_access(tp);
4029
4030         /* Restore the CLKREQ setting. */
4031         if (tg3_flag(tp, CLKREQ_BUG))
4032                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4034
4035         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036         tw32(TG3PCI_MISC_HOST_CTRL,
4037              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038
4039         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040                              tg3_flag(tp, WOL_ENABLE);
4041
4042         if (tg3_flag(tp, USE_PHYLIB)) {
4043                 do_low_power = false;
4044                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047                         struct phy_device *phydev;
4048                         u32 phyid;
4049
4050                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051
4052                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053
4054                         tp->link_config.speed = phydev->speed;
4055                         tp->link_config.duplex = phydev->duplex;
4056                         tp->link_config.autoneg = phydev->autoneg;
4057                         ethtool_convert_link_mode_to_legacy_u32(
4058                                 &tp->link_config.advertising,
4059                                 phydev->advertising);
4060
4061                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063                                          advertising);
4064                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065                                          advertising);
4066                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067                                          advertising);
4068
4069                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072                                                          advertising);
4073                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074                                                          advertising);
4075                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076                                                          advertising);
4077                                 } else {
4078                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079                                                          advertising);
4080                                 }
4081                         }
4082
4083                         linkmode_copy(phydev->advertising, advertising);
4084                         phy_start_aneg(phydev);
4085
4086                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087                         if (phyid != PHY_ID_BCMAC131) {
4088                                 phyid &= PHY_BCM_OUI_MASK;
4089                                 if (phyid == PHY_BCM_OUI_1 ||
4090                                     phyid == PHY_BCM_OUI_2 ||
4091                                     phyid == PHY_BCM_OUI_3)
4092                                         do_low_power = true;
4093                         }
4094                 }
4095         } else {
4096                 do_low_power = true;
4097
4098                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100
4101                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102                         tg3_setup_phy(tp, false);
4103         }
4104
4105         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106                 u32 val;
4107
4108                 val = tr32(GRC_VCPU_EXT_CTRL);
4109                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4111                 int i;
4112                 u32 val;
4113
4114                 for (i = 0; i < 200; i++) {
4115                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4117                                 break;
4118                         msleep(1);
4119                 }
4120         }
4121         if (tg3_flag(tp, WOL_CAP))
4122                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123                                                      WOL_DRV_STATE_SHUTDOWN |
4124                                                      WOL_DRV_WOL |
4125                                                      WOL_SET_MAGIC_PKT);
4126
4127         if (device_should_wake) {
4128                 u32 mac_mode;
4129
4130                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131                         if (do_low_power &&
4132                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133                                 tg3_phy_auxctl_write(tp,
4134                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4136                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138                                 udelay(40);
4139                         }
4140
4141                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4143                         else if (tp->phy_flags &
4144                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145                                 if (tp->link_config.active_speed == SPEED_1000)
4146                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4147                                 else
4148                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4149                         } else
4150                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4151
4152                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155                                              SPEED_100 : SPEED_10;
4156                                 if (tg3_5700_link_polarity(tp, speed))
4157                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4158                                 else
4159                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160                         }
4161                 } else {
4162                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4163                 }
4164
4165                 if (!tg3_flag(tp, 5750_PLUS))
4166                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4167
4168                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172
4173                 if (tg3_flag(tp, ENABLE_APE))
4174                         mac_mode |= MAC_MODE_APE_TX_EN |
4175                                     MAC_MODE_APE_RX_EN |
4176                                     MAC_MODE_TDE_ENABLE;
4177
4178                 tw32_f(MAC_MODE, mac_mode);
4179                 udelay(100);
4180
4181                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182                 udelay(10);
4183         }
4184
4185         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188                 u32 base_val;
4189
4190                 base_val = tp->pci_clock_ctrl;
4191                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192                              CLOCK_CTRL_TXCLK_DISABLE);
4193
4194                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196         } else if (tg3_flag(tp, 5780_CLASS) ||
4197                    tg3_flag(tp, CPMU_PRESENT) ||
4198                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4199                 /* do nothing */
4200         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201                 u32 newbits1, newbits2;
4202
4203                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4205                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206                                     CLOCK_CTRL_TXCLK_DISABLE |
4207                                     CLOCK_CTRL_ALTCLK);
4208                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209                 } else if (tg3_flag(tp, 5705_PLUS)) {
4210                         newbits1 = CLOCK_CTRL_625_CORE;
4211                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212                 } else {
4213                         newbits1 = CLOCK_CTRL_ALTCLK;
4214                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215                 }
4216
4217                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218                             40);
4219
4220                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221                             40);
4222
4223                 if (!tg3_flag(tp, 5705_PLUS)) {
4224                         u32 newbits3;
4225
4226                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4228                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229                                             CLOCK_CTRL_TXCLK_DISABLE |
4230                                             CLOCK_CTRL_44MHZ_CORE);
4231                         } else {
4232                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233                         }
4234
4235                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236                                     tp->pci_clock_ctrl | newbits3, 40);
4237                 }
4238         }
4239
4240         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241                 tg3_power_down_phy(tp, do_low_power);
4242
4243         tg3_frob_aux_power(tp, true);
4244
4245         /* Workaround for unstable PLL clock */
4246         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249                 u32 val = tr32(0x7d00);
4250
4251                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252                 tw32(0x7d00, val);
4253                 if (!tg3_flag(tp, ENABLE_ASF)) {
4254                         int err;
4255
4256                         err = tg3_nvram_lock(tp);
4257                         tg3_halt_cpu(tp, RX_CPU_BASE);
4258                         if (!err)
4259                                 tg3_nvram_unlock(tp);
4260                 }
4261         }
4262
4263         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264
4265         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4266
4267         return 0;
4268 }
4269
4270 static void tg3_power_down(struct tg3 *tp)
4271 {
4272         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273         pci_set_power_state(tp->pdev, PCI_D3hot);
4274 }
4275
4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 {
4278         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279         case MII_TG3_AUX_STAT_10HALF:
4280                 *speed = SPEED_10;
4281                 *duplex = DUPLEX_HALF;
4282                 break;
4283
4284         case MII_TG3_AUX_STAT_10FULL:
4285                 *speed = SPEED_10;
4286                 *duplex = DUPLEX_FULL;
4287                 break;
4288
4289         case MII_TG3_AUX_STAT_100HALF:
4290                 *speed = SPEED_100;
4291                 *duplex = DUPLEX_HALF;
4292                 break;
4293
4294         case MII_TG3_AUX_STAT_100FULL:
4295                 *speed = SPEED_100;
4296                 *duplex = DUPLEX_FULL;
4297                 break;
4298
4299         case MII_TG3_AUX_STAT_1000HALF:
4300                 *speed = SPEED_1000;
4301                 *duplex = DUPLEX_HALF;
4302                 break;
4303
4304         case MII_TG3_AUX_STAT_1000FULL:
4305                 *speed = SPEED_1000;
4306                 *duplex = DUPLEX_FULL;
4307                 break;
4308
4309         default:
4310                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312                                  SPEED_10;
4313                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314                                   DUPLEX_HALF;
4315                         break;
4316                 }
4317                 *speed = SPEED_UNKNOWN;
4318                 *duplex = DUPLEX_UNKNOWN;
4319                 break;
4320         }
4321 }
4322
4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4324 {
4325         int err = 0;
4326         u32 val, new_adv;
4327
4328         new_adv = ADVERTISE_CSMA;
4329         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330         new_adv |= mii_advertise_flowctrl(flowctrl);
4331
4332         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333         if (err)
4334                 goto done;
4335
4336         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338
4339                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342
4343                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4344                 if (err)
4345                         goto done;
4346         }
4347
4348         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349                 goto done;
4350
4351         tw32(TG3_CPMU_EEE_MODE,
4352              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353
4354         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4355         if (!err) {
4356                 u32 err2;
4357
4358                 val = 0;
4359                 /* Advertise 100-BaseTX EEE ability */
4360                 if (advertise & ADVERTISED_100baseT_Full)
4361                         val |= MDIO_AN_EEE_ADV_100TX;
4362                 /* Advertise 1000-BaseT EEE ability */
4363                 if (advertise & ADVERTISED_1000baseT_Full)
4364                         val |= MDIO_AN_EEE_ADV_1000T;
4365
4366                 if (!tp->eee.eee_enabled) {
4367                         val = 0;
4368                         tp->eee.advertised = 0;
4369                 } else {
4370                         tp->eee.advertised = advertise &
4371                                              (ADVERTISED_100baseT_Full |
4372                                               ADVERTISED_1000baseT_Full);
4373                 }
4374
4375                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4376                 if (err)
4377                         val = 0;
4378
4379                 switch (tg3_asic_rev(tp)) {
4380                 case ASIC_REV_5717:
4381                 case ASIC_REV_57765:
4382                 case ASIC_REV_57766:
4383                 case ASIC_REV_5719:
4384                         /* If we advertised any eee advertisements above... */
4385                         if (val)
4386                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4387                                       MII_TG3_DSP_TAP26_RMRXSTO |
4388                                       MII_TG3_DSP_TAP26_OPCSINPT;
4389                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4390                         fallthrough;
4391                 case ASIC_REV_5720:
4392                 case ASIC_REV_5762:
4393                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4394                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4395                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4396                 }
4397
4398                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4399                 if (!err)
4400                         err = err2;
4401         }
4402
4403 done:
4404         return err;
4405 }
4406
4407 static void tg3_phy_copper_begin(struct tg3 *tp)
4408 {
4409         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4410             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4411                 u32 adv, fc;
4412
4413                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4414                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4415                         adv = ADVERTISED_10baseT_Half |
4416                               ADVERTISED_10baseT_Full;
4417                         if (tg3_flag(tp, WOL_SPEED_100MB))
4418                                 adv |= ADVERTISED_100baseT_Half |
4419                                        ADVERTISED_100baseT_Full;
4420                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4421                                 if (!(tp->phy_flags &
4422                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4423                                         adv |= ADVERTISED_1000baseT_Half;
4424                                 adv |= ADVERTISED_1000baseT_Full;
4425                         }
4426
4427                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4428                 } else {
4429                         adv = tp->link_config.advertising;
4430                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4431                                 adv &= ~(ADVERTISED_1000baseT_Half |
4432                                          ADVERTISED_1000baseT_Full);
4433
4434                         fc = tp->link_config.flowctrl;
4435                 }
4436
4437                 tg3_phy_autoneg_cfg(tp, adv, fc);
4438
4439                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4440                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4441                         /* Normally during power down we want to autonegotiate
4442                          * the lowest possible speed for WOL. However, to avoid
4443                          * link flap, we leave it untouched.
4444                          */
4445                         return;
4446                 }
4447
4448                 tg3_writephy(tp, MII_BMCR,
4449                              BMCR_ANENABLE | BMCR_ANRESTART);
4450         } else {
4451                 int i;
4452                 u32 bmcr, orig_bmcr;
4453
4454                 tp->link_config.active_speed = tp->link_config.speed;
4455                 tp->link_config.active_duplex = tp->link_config.duplex;
4456
4457                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4458                         /* With autoneg disabled, 5715 only links up when the
4459                          * advertisement register has the configured speed
4460                          * enabled.
4461                          */
4462                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4463                 }
4464
4465                 bmcr = 0;
4466                 switch (tp->link_config.speed) {
4467                 default:
4468                 case SPEED_10:
4469                         break;
4470
4471                 case SPEED_100:
4472                         bmcr |= BMCR_SPEED100;
4473                         break;
4474
4475                 case SPEED_1000:
4476                         bmcr |= BMCR_SPEED1000;
4477                         break;
4478                 }
4479
4480                 if (tp->link_config.duplex == DUPLEX_FULL)
4481                         bmcr |= BMCR_FULLDPLX;
4482
4483                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4484                     (bmcr != orig_bmcr)) {
4485                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4486                         for (i = 0; i < 1500; i++) {
4487                                 u32 tmp;
4488
4489                                 udelay(10);
4490                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4491                                     tg3_readphy(tp, MII_BMSR, &tmp))
4492                                         continue;
4493                                 if (!(tmp & BMSR_LSTATUS)) {
4494                                         udelay(40);
4495                                         break;
4496                                 }
4497                         }
4498                         tg3_writephy(tp, MII_BMCR, bmcr);
4499                         udelay(40);
4500                 }
4501         }
4502 }
4503
4504 static int tg3_phy_pull_config(struct tg3 *tp)
4505 {
4506         int err;
4507         u32 val;
4508
4509         err = tg3_readphy(tp, MII_BMCR, &val);
4510         if (err)
4511                 goto done;
4512
4513         if (!(val & BMCR_ANENABLE)) {
4514                 tp->link_config.autoneg = AUTONEG_DISABLE;
4515                 tp->link_config.advertising = 0;
4516                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4517
4518                 err = -EIO;
4519
4520                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4521                 case 0:
4522                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523                                 goto done;
4524
4525                         tp->link_config.speed = SPEED_10;
4526                         break;
4527                 case BMCR_SPEED100:
4528                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4529                                 goto done;
4530
4531                         tp->link_config.speed = SPEED_100;
4532                         break;
4533                 case BMCR_SPEED1000:
4534                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4535                                 tp->link_config.speed = SPEED_1000;
4536                                 break;
4537                         }
4538                         fallthrough;
4539                 default:
4540                         goto done;
4541                 }
4542
4543                 if (val & BMCR_FULLDPLX)
4544                         tp->link_config.duplex = DUPLEX_FULL;
4545                 else
4546                         tp->link_config.duplex = DUPLEX_HALF;
4547
4548                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4549
4550                 err = 0;
4551                 goto done;
4552         }
4553
4554         tp->link_config.autoneg = AUTONEG_ENABLE;
4555         tp->link_config.advertising = ADVERTISED_Autoneg;
4556         tg3_flag_set(tp, PAUSE_AUTONEG);
4557
4558         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4559                 u32 adv;
4560
4561                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4562                 if (err)
4563                         goto done;
4564
4565                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4566                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4567
4568                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4569         } else {
4570                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4571         }
4572
4573         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4574                 u32 adv;
4575
4576                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4577                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4578                         if (err)
4579                                 goto done;
4580
4581                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4582                 } else {
4583                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4584                         if (err)
4585                                 goto done;
4586
4587                         adv = tg3_decode_flowctrl_1000X(val);
4588                         tp->link_config.flowctrl = adv;
4589
4590                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4591                         adv = mii_adv_to_ethtool_adv_x(val);
4592                 }
4593
4594                 tp->link_config.advertising |= adv;
4595         }
4596
4597 done:
4598         return err;
4599 }
4600
4601 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4602 {
4603         int err;
4604
4605         /* Turn off tap power management. */
4606         /* Set Extended packet length bit */
4607         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4608
4609         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4610         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4611         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4612         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4613         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4614
4615         udelay(40);
4616
4617         return err;
4618 }
4619
4620 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4621 {
4622         struct ethtool_eee eee;
4623
4624         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4625                 return true;
4626
4627         tg3_eee_pull_config(tp, &eee);
4628
4629         if (tp->eee.eee_enabled) {
4630                 if (tp->eee.advertised != eee.advertised ||
4631                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4632                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4633                         return false;
4634         } else {
4635                 /* EEE is disabled but we're advertising */
4636                 if (eee.advertised)
4637                         return false;
4638         }
4639
4640         return true;
4641 }
4642
4643 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4644 {
4645         u32 advmsk, tgtadv, advertising;
4646
4647         advertising = tp->link_config.advertising;
4648         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4649
4650         advmsk = ADVERTISE_ALL;
4651         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4652                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4653                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4654         }
4655
4656         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4657                 return false;
4658
4659         if ((*lcladv & advmsk) != tgtadv)
4660                 return false;
4661
4662         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4663                 u32 tg3_ctrl;
4664
4665                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4666
4667                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4668                         return false;
4669
4670                 if (tgtadv &&
4671                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4672                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4673                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4674                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4675                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4676                 } else {
4677                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4678                 }
4679
4680                 if (tg3_ctrl != tgtadv)
4681                         return false;
4682         }
4683
4684         return true;
4685 }
4686
4687 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4688 {
4689         u32 lpeth = 0;
4690
4691         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4692                 u32 val;
4693
4694                 if (tg3_readphy(tp, MII_STAT1000, &val))
4695                         return false;
4696
4697                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4698         }
4699
4700         if (tg3_readphy(tp, MII_LPA, rmtadv))
4701                 return false;
4702
4703         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4704         tp->link_config.rmt_adv = lpeth;
4705
4706         return true;
4707 }
4708
4709 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4710 {
4711         if (curr_link_up != tp->link_up) {
4712                 if (curr_link_up) {
4713                         netif_carrier_on(tp->dev);
4714                 } else {
4715                         netif_carrier_off(tp->dev);
4716                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4717                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4718                 }
4719
4720                 tg3_link_report(tp);
4721                 return true;
4722         }
4723
4724         return false;
4725 }
4726
4727 static void tg3_clear_mac_status(struct tg3 *tp)
4728 {
4729         tw32(MAC_EVENT, 0);
4730
4731         tw32_f(MAC_STATUS,
4732                MAC_STATUS_SYNC_CHANGED |
4733                MAC_STATUS_CFG_CHANGED |
4734                MAC_STATUS_MI_COMPLETION |
4735                MAC_STATUS_LNKSTATE_CHANGED);
4736         udelay(40);
4737 }
4738
4739 static void tg3_setup_eee(struct tg3 *tp)
4740 {
4741         u32 val;
4742
4743         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4744               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4745         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4746                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4747
4748         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4749
4750         tw32_f(TG3_CPMU_EEE_CTRL,
4751                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4752
4753         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4754               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4755               TG3_CPMU_EEEMD_LPI_IN_RX |
4756               TG3_CPMU_EEEMD_EEE_ENABLE;
4757
4758         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4759                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4760
4761         if (tg3_flag(tp, ENABLE_APE))
4762                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4763
4764         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4765
4766         tw32_f(TG3_CPMU_EEE_DBTMR1,
4767                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4768                (tp->eee.tx_lpi_timer & 0xffff));
4769
4770         tw32_f(TG3_CPMU_EEE_DBTMR2,
4771                TG3_CPMU_DBTMR2_APE_TX_2047US |
4772                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4773 }
4774
4775 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4776 {
4777         bool current_link_up;
4778         u32 bmsr, val;
4779         u32 lcl_adv, rmt_adv;
4780         u32 current_speed;
4781         u8 current_duplex;
4782         int i, err;
4783
4784         tg3_clear_mac_status(tp);
4785
4786         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4787                 tw32_f(MAC_MI_MODE,
4788                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4789                 udelay(80);
4790         }
4791
4792         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4793
4794         /* Some third-party PHYs need to be reset on link going
4795          * down.
4796          */
4797         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4798              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4799              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4800             tp->link_up) {
4801                 tg3_readphy(tp, MII_BMSR, &bmsr);
4802                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4803                     !(bmsr & BMSR_LSTATUS))
4804                         force_reset = true;
4805         }
4806         if (force_reset)
4807                 tg3_phy_reset(tp);
4808
4809         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4810                 tg3_readphy(tp, MII_BMSR, &bmsr);
4811                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4812                     !tg3_flag(tp, INIT_COMPLETE))
4813                         bmsr = 0;
4814
4815                 if (!(bmsr & BMSR_LSTATUS)) {
4816                         err = tg3_init_5401phy_dsp(tp);
4817                         if (err)
4818                                 return err;
4819
4820                         tg3_readphy(tp, MII_BMSR, &bmsr);
4821                         for (i = 0; i < 1000; i++) {
4822                                 udelay(10);
4823                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4824                                     (bmsr & BMSR_LSTATUS)) {
4825                                         udelay(40);
4826                                         break;
4827                                 }
4828                         }
4829
4830                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4831                             TG3_PHY_REV_BCM5401_B0 &&
4832                             !(bmsr & BMSR_LSTATUS) &&
4833                             tp->link_config.active_speed == SPEED_1000) {
4834                                 err = tg3_phy_reset(tp);
4835                                 if (!err)
4836                                         err = tg3_init_5401phy_dsp(tp);
4837                                 if (err)
4838                                         return err;
4839                         }
4840                 }
4841         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4842                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4843                 /* 5701 {A0,B0} CRC bug workaround */
4844                 tg3_writephy(tp, 0x15, 0x0a75);
4845                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4847                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848         }
4849
4850         /* Clear pending interrupts... */
4851         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4853
4854         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4855                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4856         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4857                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4858
4859         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4860             tg3_asic_rev(tp) == ASIC_REV_5701) {
4861                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4862                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4863                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4864                 else
4865                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4866         }
4867
4868         current_link_up = false;
4869         current_speed = SPEED_UNKNOWN;
4870         current_duplex = DUPLEX_UNKNOWN;
4871         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4872         tp->link_config.rmt_adv = 0;
4873
4874         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4875                 err = tg3_phy_auxctl_read(tp,
4876                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4877                                           &val);
4878                 if (!err && !(val & (1 << 10))) {
4879                         tg3_phy_auxctl_write(tp,
4880                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4881                                              val | (1 << 10));
4882                         goto relink;
4883                 }
4884         }
4885
4886         bmsr = 0;
4887         for (i = 0; i < 100; i++) {
4888                 tg3_readphy(tp, MII_BMSR, &bmsr);
4889                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4890                     (bmsr & BMSR_LSTATUS))
4891                         break;
4892                 udelay(40);
4893         }
4894
4895         if (bmsr & BMSR_LSTATUS) {
4896                 u32 aux_stat, bmcr;
4897
4898                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4899                 for (i = 0; i < 2000; i++) {
4900                         udelay(10);
4901                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4902                             aux_stat)
4903                                 break;
4904                 }
4905
4906                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4907                                              &current_speed,
4908                                              &current_duplex);
4909
4910                 bmcr = 0;
4911                 for (i = 0; i < 200; i++) {
4912                         tg3_readphy(tp, MII_BMCR, &bmcr);
4913                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4914                                 continue;
4915                         if (bmcr && bmcr != 0x7fff)
4916                                 break;
4917                         udelay(10);
4918                 }
4919
4920                 lcl_adv = 0;
4921                 rmt_adv = 0;
4922
4923                 tp->link_config.active_speed = current_speed;
4924                 tp->link_config.active_duplex = current_duplex;
4925
4926                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4927                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4928
4929                         if ((bmcr & BMCR_ANENABLE) &&
4930                             eee_config_ok &&
4931                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4932                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4933                                 current_link_up = true;
4934
4935                         /* EEE settings changes take effect only after a phy
4936                          * reset.  If we have skipped a reset due to Link Flap
4937                          * Avoidance being enabled, do it now.
4938                          */
4939                         if (!eee_config_ok &&
4940                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4941                             !force_reset) {
4942                                 tg3_setup_eee(tp);
4943                                 tg3_phy_reset(tp);
4944                         }
4945                 } else {
4946                         if (!(bmcr & BMCR_ANENABLE) &&
4947                             tp->link_config.speed == current_speed &&
4948                             tp->link_config.duplex == current_duplex) {
4949                                 current_link_up = true;
4950                         }
4951                 }
4952
4953                 if (current_link_up &&
4954                     tp->link_config.active_duplex == DUPLEX_FULL) {
4955                         u32 reg, bit;
4956
4957                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4958                                 reg = MII_TG3_FET_GEN_STAT;
4959                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4960                         } else {
4961                                 reg = MII_TG3_EXT_STAT;
4962                                 bit = MII_TG3_EXT_STAT_MDIX;
4963                         }
4964
4965                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4966                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4967
4968                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4969                 }
4970         }
4971
4972 relink:
4973         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4974                 tg3_phy_copper_begin(tp);
4975
4976                 if (tg3_flag(tp, ROBOSWITCH)) {
4977                         current_link_up = true;
4978                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4979                         current_speed = SPEED_1000;
4980                         current_duplex = DUPLEX_FULL;
4981                         tp->link_config.active_speed = current_speed;
4982                         tp->link_config.active_duplex = current_duplex;
4983                 }
4984
4985                 tg3_readphy(tp, MII_BMSR, &bmsr);
4986                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4987                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4988                         current_link_up = true;
4989         }
4990
4991         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4992         if (current_link_up) {
4993                 if (tp->link_config.active_speed == SPEED_100 ||
4994                     tp->link_config.active_speed == SPEED_10)
4995                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996                 else
4997                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4998         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4999                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5000         else
5001                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5002
5003         /* In order for the 5750 core in BCM4785 chip to work properly
5004          * in RGMII mode, the Led Control Register must be set up.
5005          */
5006         if (tg3_flag(tp, RGMII_MODE)) {
5007                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5008                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5009
5010                 if (tp->link_config.active_speed == SPEED_10)
5011                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5012                 else if (tp->link_config.active_speed == SPEED_100)
5013                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014                                      LED_CTRL_100MBPS_ON);
5015                 else if (tp->link_config.active_speed == SPEED_1000)
5016                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5017                                      LED_CTRL_1000MBPS_ON);
5018
5019                 tw32(MAC_LED_CTRL, led_ctrl);
5020                 udelay(40);
5021         }
5022
5023         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5024         if (tp->link_config.active_duplex == DUPLEX_HALF)
5025                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5026
5027         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5028                 if (current_link_up &&
5029                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5030                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5031                 else
5032                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5033         }
5034
5035         /* ??? Without this setting Netgear GA302T PHY does not
5036          * ??? send/receive packets...
5037          */
5038         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5039             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5040                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5041                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5042                 udelay(80);
5043         }
5044
5045         tw32_f(MAC_MODE, tp->mac_mode);
5046         udelay(40);
5047
5048         tg3_phy_eee_adjust(tp, current_link_up);
5049
5050         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5051                 /* Polled via timer. */
5052                 tw32_f(MAC_EVENT, 0);
5053         } else {
5054                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5055         }
5056         udelay(40);
5057
5058         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5059             current_link_up &&
5060             tp->link_config.active_speed == SPEED_1000 &&
5061             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5062                 udelay(120);
5063                 tw32_f(MAC_STATUS,
5064                      (MAC_STATUS_SYNC_CHANGED |
5065                       MAC_STATUS_CFG_CHANGED));
5066                 udelay(40);
5067                 tg3_write_mem(tp,
5068                               NIC_SRAM_FIRMWARE_MBOX,
5069                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5070         }
5071
5072         /* Prevent send BD corruption. */
5073         if (tg3_flag(tp, CLKREQ_BUG)) {
5074                 if (tp->link_config.active_speed == SPEED_100 ||
5075                     tp->link_config.active_speed == SPEED_10)
5076                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5077                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5078                 else
5079                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5080                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5081         }
5082
5083         tg3_test_and_report_link_chg(tp, current_link_up);
5084
5085         return 0;
5086 }
5087
5088 struct tg3_fiber_aneginfo {
5089         int state;
5090 #define ANEG_STATE_UNKNOWN              0
5091 #define ANEG_STATE_AN_ENABLE            1
5092 #define ANEG_STATE_RESTART_INIT         2
5093 #define ANEG_STATE_RESTART              3
5094 #define ANEG_STATE_DISABLE_LINK_OK      4
5095 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5096 #define ANEG_STATE_ABILITY_DETECT       6
5097 #define ANEG_STATE_ACK_DETECT_INIT      7
5098 #define ANEG_STATE_ACK_DETECT           8
5099 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5100 #define ANEG_STATE_COMPLETE_ACK         10
5101 #define ANEG_STATE_IDLE_DETECT_INIT     11
5102 #define ANEG_STATE_IDLE_DETECT          12
5103 #define ANEG_STATE_LINK_OK              13
5104 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5105 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5106
5107         u32 flags;
5108 #define MR_AN_ENABLE            0x00000001
5109 #define MR_RESTART_AN           0x00000002
5110 #define MR_AN_COMPLETE          0x00000004
5111 #define MR_PAGE_RX              0x00000008
5112 #define MR_NP_LOADED            0x00000010
5113 #define MR_TOGGLE_TX            0x00000020
5114 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5115 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5116 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5117 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5118 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5119 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5120 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5121 #define MR_TOGGLE_RX            0x00002000
5122 #define MR_NP_RX                0x00004000
5123
5124 #define MR_LINK_OK              0x80000000
5125
5126         unsigned long link_time, cur_time;
5127
5128         u32 ability_match_cfg;
5129         int ability_match_count;
5130
5131         char ability_match, idle_match, ack_match;
5132
5133         u32 txconfig, rxconfig;
5134 #define ANEG_CFG_NP             0x00000080
5135 #define ANEG_CFG_ACK            0x00000040
5136 #define ANEG_CFG_RF2            0x00000020
5137 #define ANEG_CFG_RF1            0x00000010
5138 #define ANEG_CFG_PS2            0x00000001
5139 #define ANEG_CFG_PS1            0x00008000
5140 #define ANEG_CFG_HD             0x00004000
5141 #define ANEG_CFG_FD             0x00002000
5142 #define ANEG_CFG_INVAL          0x00001f06
5143
5144 };
5145 #define ANEG_OK         0
5146 #define ANEG_DONE       1
5147 #define ANEG_TIMER_ENAB 2
5148 #define ANEG_FAILED     -1
5149
5150 #define ANEG_STATE_SETTLE_TIME  10000
5151
5152 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5153                                    struct tg3_fiber_aneginfo *ap)
5154 {
5155         u16 flowctrl;
5156         unsigned long delta;
5157         u32 rx_cfg_reg;
5158         int ret;
5159
5160         if (ap->state == ANEG_STATE_UNKNOWN) {
5161                 ap->rxconfig = 0;
5162                 ap->link_time = 0;
5163                 ap->cur_time = 0;
5164                 ap->ability_match_cfg = 0;
5165                 ap->ability_match_count = 0;
5166                 ap->ability_match = 0;
5167                 ap->idle_match = 0;
5168                 ap->ack_match = 0;
5169         }
5170         ap->cur_time++;
5171
5172         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5173                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5174
5175                 if (rx_cfg_reg != ap->ability_match_cfg) {
5176                         ap->ability_match_cfg = rx_cfg_reg;
5177                         ap->ability_match = 0;
5178                         ap->ability_match_count = 0;
5179                 } else {
5180                         if (++ap->ability_match_count > 1) {
5181                                 ap->ability_match = 1;
5182                                 ap->ability_match_cfg = rx_cfg_reg;
5183                         }
5184                 }
5185                 if (rx_cfg_reg & ANEG_CFG_ACK)
5186                         ap->ack_match = 1;
5187                 else
5188                         ap->ack_match = 0;
5189
5190                 ap->idle_match = 0;
5191         } else {
5192                 ap->idle_match = 1;
5193                 ap->ability_match_cfg = 0;
5194                 ap->ability_match_count = 0;
5195                 ap->ability_match = 0;
5196                 ap->ack_match = 0;
5197
5198                 rx_cfg_reg = 0;
5199         }
5200
5201         ap->rxconfig = rx_cfg_reg;
5202         ret = ANEG_OK;
5203
5204         switch (ap->state) {
5205         case ANEG_STATE_UNKNOWN:
5206                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5207                         ap->state = ANEG_STATE_AN_ENABLE;
5208
5209                 fallthrough;
5210         case ANEG_STATE_AN_ENABLE:
5211                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5212                 if (ap->flags & MR_AN_ENABLE) {
5213                         ap->link_time = 0;
5214                         ap->cur_time = 0;
5215                         ap->ability_match_cfg = 0;
5216                         ap->ability_match_count = 0;
5217                         ap->ability_match = 0;
5218                         ap->idle_match = 0;
5219                         ap->ack_match = 0;
5220
5221                         ap->state = ANEG_STATE_RESTART_INIT;
5222                 } else {
5223                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5224                 }
5225                 break;
5226
5227         case ANEG_STATE_RESTART_INIT:
5228                 ap->link_time = ap->cur_time;
5229                 ap->flags &= ~(MR_NP_LOADED);
5230                 ap->txconfig = 0;
5231                 tw32(MAC_TX_AUTO_NEG, 0);
5232                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5233                 tw32_f(MAC_MODE, tp->mac_mode);
5234                 udelay(40);
5235
5236                 ret = ANEG_TIMER_ENAB;
5237                 ap->state = ANEG_STATE_RESTART;
5238
5239                 fallthrough;
5240         case ANEG_STATE_RESTART:
5241                 delta = ap->cur_time - ap->link_time;
5242                 if (delta > ANEG_STATE_SETTLE_TIME)
5243                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5244                 else
5245                         ret = ANEG_TIMER_ENAB;
5246                 break;
5247
5248         case ANEG_STATE_DISABLE_LINK_OK:
5249                 ret = ANEG_DONE;
5250                 break;
5251
5252         case ANEG_STATE_ABILITY_DETECT_INIT:
5253                 ap->flags &= ~(MR_TOGGLE_TX);
5254                 ap->txconfig = ANEG_CFG_FD;
5255                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5256                 if (flowctrl & ADVERTISE_1000XPAUSE)
5257                         ap->txconfig |= ANEG_CFG_PS1;
5258                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5259                         ap->txconfig |= ANEG_CFG_PS2;
5260                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5261                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5262                 tw32_f(MAC_MODE, tp->mac_mode);
5263                 udelay(40);
5264
5265                 ap->state = ANEG_STATE_ABILITY_DETECT;
5266                 break;
5267
5268         case ANEG_STATE_ABILITY_DETECT:
5269                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5270                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5271                 break;
5272
5273         case ANEG_STATE_ACK_DETECT_INIT:
5274                 ap->txconfig |= ANEG_CFG_ACK;
5275                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5276                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5277                 tw32_f(MAC_MODE, tp->mac_mode);
5278                 udelay(40);
5279
5280                 ap->state = ANEG_STATE_ACK_DETECT;
5281
5282                 fallthrough;
5283         case ANEG_STATE_ACK_DETECT:
5284                 if (ap->ack_match != 0) {
5285                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5286                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5287                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5288                         } else {
5289                                 ap->state = ANEG_STATE_AN_ENABLE;
5290                         }
5291                 } else if (ap->ability_match != 0 &&
5292                            ap->rxconfig == 0) {
5293                         ap->state = ANEG_STATE_AN_ENABLE;
5294                 }
5295                 break;
5296
5297         case ANEG_STATE_COMPLETE_ACK_INIT:
5298                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5299                         ret = ANEG_FAILED;
5300                         break;
5301                 }
5302                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5303                                MR_LP_ADV_HALF_DUPLEX |
5304                                MR_LP_ADV_SYM_PAUSE |
5305                                MR_LP_ADV_ASYM_PAUSE |
5306                                MR_LP_ADV_REMOTE_FAULT1 |
5307                                MR_LP_ADV_REMOTE_FAULT2 |
5308                                MR_LP_ADV_NEXT_PAGE |
5309                                MR_TOGGLE_RX |
5310                                MR_NP_RX);
5311                 if (ap->rxconfig & ANEG_CFG_FD)
5312                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5313                 if (ap->rxconfig & ANEG_CFG_HD)
5314                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5315                 if (ap->rxconfig & ANEG_CFG_PS1)
5316                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5317                 if (ap->rxconfig & ANEG_CFG_PS2)
5318                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5319                 if (ap->rxconfig & ANEG_CFG_RF1)
5320                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5321                 if (ap->rxconfig & ANEG_CFG_RF2)
5322                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5323                 if (ap->rxconfig & ANEG_CFG_NP)
5324                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5325
5326                 ap->link_time = ap->cur_time;
5327
5328                 ap->flags ^= (MR_TOGGLE_TX);
5329                 if (ap->rxconfig & 0x0008)
5330                         ap->flags |= MR_TOGGLE_RX;
5331                 if (ap->rxconfig & ANEG_CFG_NP)
5332                         ap->flags |= MR_NP_RX;
5333                 ap->flags |= MR_PAGE_RX;
5334
5335                 ap->state = ANEG_STATE_COMPLETE_ACK;
5336                 ret = ANEG_TIMER_ENAB;
5337                 break;
5338
5339         case ANEG_STATE_COMPLETE_ACK:
5340                 if (ap->ability_match != 0 &&
5341                     ap->rxconfig == 0) {
5342                         ap->state = ANEG_STATE_AN_ENABLE;
5343                         break;
5344                 }
5345                 delta = ap->cur_time - ap->link_time;
5346                 if (delta > ANEG_STATE_SETTLE_TIME) {
5347                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5348                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5349                         } else {
5350                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5351                                     !(ap->flags & MR_NP_RX)) {
5352                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5353                                 } else {
5354                                         ret = ANEG_FAILED;
5355                                 }
5356                         }
5357                 }
5358                 break;
5359
5360         case ANEG_STATE_IDLE_DETECT_INIT:
5361                 ap->link_time = ap->cur_time;
5362                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5363                 tw32_f(MAC_MODE, tp->mac_mode);
5364                 udelay(40);
5365
5366                 ap->state = ANEG_STATE_IDLE_DETECT;
5367                 ret = ANEG_TIMER_ENAB;
5368                 break;
5369
5370         case ANEG_STATE_IDLE_DETECT:
5371                 if (ap->ability_match != 0 &&
5372                     ap->rxconfig == 0) {
5373                         ap->state = ANEG_STATE_AN_ENABLE;
5374                         break;
5375                 }
5376                 delta = ap->cur_time - ap->link_time;
5377                 if (delta > ANEG_STATE_SETTLE_TIME) {
5378                         /* XXX another gem from the Broadcom driver :( */
5379                         ap->state = ANEG_STATE_LINK_OK;
5380                 }
5381                 break;
5382
5383         case ANEG_STATE_LINK_OK:
5384                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5385                 ret = ANEG_DONE;
5386                 break;
5387
5388         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5389                 /* ??? unimplemented */
5390                 break;
5391
5392         case ANEG_STATE_NEXT_PAGE_WAIT:
5393                 /* ??? unimplemented */
5394                 break;
5395
5396         default:
5397                 ret = ANEG_FAILED;
5398                 break;
5399         }
5400
5401         return ret;
5402 }
5403
5404 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5405 {
5406         int res = 0;
5407         struct tg3_fiber_aneginfo aninfo;
5408         int status = ANEG_FAILED;
5409         unsigned int tick;
5410         u32 tmp;
5411
5412         tw32_f(MAC_TX_AUTO_NEG, 0);
5413
5414         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5415         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5416         udelay(40);
5417
5418         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5419         udelay(40);
5420
5421         memset(&aninfo, 0, sizeof(aninfo));
5422         aninfo.flags |= MR_AN_ENABLE;
5423         aninfo.state = ANEG_STATE_UNKNOWN;
5424         aninfo.cur_time = 0;
5425         tick = 0;
5426         while (++tick < 195000) {
5427                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5428                 if (status == ANEG_DONE || status == ANEG_FAILED)
5429                         break;
5430
5431                 udelay(1);
5432         }
5433
5434         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5435         tw32_f(MAC_MODE, tp->mac_mode);
5436         udelay(40);
5437
5438         *txflags = aninfo.txconfig;
5439         *rxflags = aninfo.flags;
5440
5441         if (status == ANEG_DONE &&
5442             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5443                              MR_LP_ADV_FULL_DUPLEX)))
5444                 res = 1;
5445
5446         return res;
5447 }
5448
5449 static void tg3_init_bcm8002(struct tg3 *tp)
5450 {
5451         u32 mac_status = tr32(MAC_STATUS);
5452         int i;
5453
5454         /* Reset when initting first time or we have a link. */
5455         if (tg3_flag(tp, INIT_COMPLETE) &&
5456             !(mac_status & MAC_STATUS_PCS_SYNCED))
5457                 return;
5458
5459         /* Set PLL lock range. */
5460         tg3_writephy(tp, 0x16, 0x8007);
5461
5462         /* SW reset */
5463         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5464
5465         /* Wait for reset to complete. */
5466         /* XXX schedule_timeout() ... */
5467         for (i = 0; i < 500; i++)
5468                 udelay(10);
5469
5470         /* Config mode; select PMA/Ch 1 regs. */
5471         tg3_writephy(tp, 0x10, 0x8411);
5472
5473         /* Enable auto-lock and comdet, select txclk for tx. */
5474         tg3_writephy(tp, 0x11, 0x0a10);
5475
5476         tg3_writephy(tp, 0x18, 0x00a0);
5477         tg3_writephy(tp, 0x16, 0x41ff);
5478
5479         /* Assert and deassert POR. */
5480         tg3_writephy(tp, 0x13, 0x0400);
5481         udelay(40);
5482         tg3_writephy(tp, 0x13, 0x0000);
5483
5484         tg3_writephy(tp, 0x11, 0x0a50);
5485         udelay(40);
5486         tg3_writephy(tp, 0x11, 0x0a10);
5487
5488         /* Wait for signal to stabilize */
5489         /* XXX schedule_timeout() ... */
5490         for (i = 0; i < 15000; i++)
5491                 udelay(10);
5492
5493         /* Deselect the channel register so we can read the PHYID
5494          * later.
5495          */
5496         tg3_writephy(tp, 0x10, 0x8011);
5497 }
5498
5499 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5500 {
5501         u16 flowctrl;
5502         bool current_link_up;
5503         u32 sg_dig_ctrl, sg_dig_status;
5504         u32 serdes_cfg, expected_sg_dig_ctrl;
5505         int workaround, port_a;
5506
5507         serdes_cfg = 0;
5508         workaround = 0;
5509         port_a = 1;
5510         current_link_up = false;
5511
5512         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5513             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5514                 workaround = 1;
5515                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516                         port_a = 0;
5517
5518                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5519                 /* preserve bits 20-23 for voltage regulator */
5520                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521         }
5522
5523         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5524
5525         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5526                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5527                         if (workaround) {
5528                                 u32 val = serdes_cfg;
5529
5530                                 if (port_a)
5531                                         val |= 0xc010000;
5532                                 else
5533                                         val |= 0x4010000;
5534                                 tw32_f(MAC_SERDES_CFG, val);
5535                         }
5536
5537                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5538                 }
5539                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5540                         tg3_setup_flow_control(tp, 0, 0);
5541                         current_link_up = true;
5542                 }
5543                 goto out;
5544         }
5545
5546         /* Want auto-negotiation.  */
5547         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5548
5549         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5550         if (flowctrl & ADVERTISE_1000XPAUSE)
5551                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5552         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5553                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5554
5555         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5556                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5557                     tp->serdes_counter &&
5558                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5559                                     MAC_STATUS_RCVD_CFG)) ==
5560                      MAC_STATUS_PCS_SYNCED)) {
5561                         tp->serdes_counter--;
5562                         current_link_up = true;
5563                         goto out;
5564                 }
5565 restart_autoneg:
5566                 if (workaround)
5567                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5568                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5569                 udelay(5);
5570                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5571
5572                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5573                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5574         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5575                                  MAC_STATUS_SIGNAL_DET)) {
5576                 sg_dig_status = tr32(SG_DIG_STATUS);
5577                 mac_status = tr32(MAC_STATUS);
5578
5579                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5580                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5581                         u32 local_adv = 0, remote_adv = 0;
5582
5583                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5584                                 local_adv |= ADVERTISE_1000XPAUSE;
5585                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5586                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5587
5588                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5589                                 remote_adv |= LPA_1000XPAUSE;
5590                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5591                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5592
5593                         tp->link_config.rmt_adv =
5594                                            mii_adv_to_ethtool_adv_x(remote_adv);
5595
5596                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5597                         current_link_up = true;
5598                         tp->serdes_counter = 0;
5599                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5600                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5601                         if (tp->serdes_counter)
5602                                 tp->serdes_counter--;
5603                         else {
5604                                 if (workaround) {
5605                                         u32 val = serdes_cfg;
5606
5607                                         if (port_a)
5608                                                 val |= 0xc010000;
5609                                         else
5610                                                 val |= 0x4010000;
5611
5612                                         tw32_f(MAC_SERDES_CFG, val);
5613                                 }
5614
5615                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616                                 udelay(40);
5617
5618                                 /* Link parallel detection - link is up */
5619                                 /* only if we have PCS_SYNC and not */
5620                                 /* receiving config code words */
5621                                 mac_status = tr32(MAC_STATUS);
5622                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5623                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5624                                         tg3_setup_flow_control(tp, 0, 0);
5625                                         current_link_up = true;
5626                                         tp->phy_flags |=
5627                                                 TG3_PHYFLG_PARALLEL_DETECT;
5628                                         tp->serdes_counter =
5629                                                 SERDES_PARALLEL_DET_TIMEOUT;
5630                                 } else
5631                                         goto restart_autoneg;
5632                         }
5633                 }
5634         } else {
5635                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5636                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5637         }
5638
5639 out:
5640         return current_link_up;
5641 }
5642
5643 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5644 {
5645         bool current_link_up = false;
5646
5647         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648                 goto out;
5649
5650         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5651                 u32 txflags, rxflags;
5652                 int i;
5653
5654                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5655                         u32 local_adv = 0, remote_adv = 0;
5656
5657                         if (txflags & ANEG_CFG_PS1)
5658                                 local_adv |= ADVERTISE_1000XPAUSE;
5659                         if (txflags & ANEG_CFG_PS2)
5660                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5661
5662                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5663                                 remote_adv |= LPA_1000XPAUSE;
5664                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5665                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5666
5667                         tp->link_config.rmt_adv =
5668                                            mii_adv_to_ethtool_adv_x(remote_adv);
5669
5670                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5671
5672                         current_link_up = true;
5673                 }
5674                 for (i = 0; i < 30; i++) {
5675                         udelay(20);
5676                         tw32_f(MAC_STATUS,
5677                                (MAC_STATUS_SYNC_CHANGED |
5678                                 MAC_STATUS_CFG_CHANGED));
5679                         udelay(40);
5680                         if ((tr32(MAC_STATUS) &
5681                              (MAC_STATUS_SYNC_CHANGED |
5682                               MAC_STATUS_CFG_CHANGED)) == 0)
5683                                 break;
5684                 }
5685
5686                 mac_status = tr32(MAC_STATUS);
5687                 if (!current_link_up &&
5688                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5689                     !(mac_status & MAC_STATUS_RCVD_CFG))
5690                         current_link_up = true;
5691         } else {
5692                 tg3_setup_flow_control(tp, 0, 0);
5693
5694                 /* Forcing 1000FD link up. */
5695                 current_link_up = true;
5696
5697                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698                 udelay(40);
5699
5700                 tw32_f(MAC_MODE, tp->mac_mode);
5701                 udelay(40);
5702         }
5703
5704 out:
5705         return current_link_up;
5706 }
5707
5708 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 {
5710         u32 orig_pause_cfg;
5711         u32 orig_active_speed;
5712         u8 orig_active_duplex;
5713         u32 mac_status;
5714         bool current_link_up;
5715         int i;
5716
5717         orig_pause_cfg = tp->link_config.active_flowctrl;
5718         orig_active_speed = tp->link_config.active_speed;
5719         orig_active_duplex = tp->link_config.active_duplex;
5720
5721         if (!tg3_flag(tp, HW_AUTONEG) &&
5722             tp->link_up &&
5723             tg3_flag(tp, INIT_COMPLETE)) {
5724                 mac_status = tr32(MAC_STATUS);
5725                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5726                                MAC_STATUS_SIGNAL_DET |
5727                                MAC_STATUS_CFG_CHANGED |
5728                                MAC_STATUS_RCVD_CFG);
5729                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5730                                    MAC_STATUS_SIGNAL_DET)) {
5731                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5732                                             MAC_STATUS_CFG_CHANGED));
5733                         return 0;
5734                 }
5735         }
5736
5737         tw32_f(MAC_TX_AUTO_NEG, 0);
5738
5739         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5740         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5741         tw32_f(MAC_MODE, tp->mac_mode);
5742         udelay(40);
5743
5744         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5745                 tg3_init_bcm8002(tp);
5746
5747         /* Enable link change event even when serdes polling.  */
5748         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749         udelay(40);
5750
5751         tp->link_config.rmt_adv = 0;
5752         mac_status = tr32(MAC_STATUS);
5753
5754         if (tg3_flag(tp, HW_AUTONEG))
5755                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5756         else
5757                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5758
5759         tp->napi[0].hw_status->status =
5760                 (SD_STATUS_UPDATED |
5761                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5762
5763         for (i = 0; i < 100; i++) {
5764                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5765                                     MAC_STATUS_CFG_CHANGED));
5766                 udelay(5);
5767                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5768                                          MAC_STATUS_CFG_CHANGED |
5769                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5770                         break;
5771         }
5772
5773         mac_status = tr32(MAC_STATUS);
5774         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5775                 current_link_up = false;
5776                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5777                     tp->serdes_counter == 0) {
5778                         tw32_f(MAC_MODE, (tp->mac_mode |
5779                                           MAC_MODE_SEND_CONFIGS));
5780                         udelay(1);
5781                         tw32_f(MAC_MODE, tp->mac_mode);
5782                 }
5783         }
5784
5785         if (current_link_up) {
5786                 tp->link_config.active_speed = SPEED_1000;
5787                 tp->link_config.active_duplex = DUPLEX_FULL;
5788                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5789                                     LED_CTRL_LNKLED_OVERRIDE |
5790                                     LED_CTRL_1000MBPS_ON));
5791         } else {
5792                 tp->link_config.active_speed = SPEED_UNKNOWN;
5793                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5794                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5795                                     LED_CTRL_LNKLED_OVERRIDE |
5796                                     LED_CTRL_TRAFFIC_OVERRIDE));
5797         }
5798
5799         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5800                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5801                 if (orig_pause_cfg != now_pause_cfg ||
5802                     orig_active_speed != tp->link_config.active_speed ||
5803                     orig_active_duplex != tp->link_config.active_duplex)
5804                         tg3_link_report(tp);
5805         }
5806
5807         return 0;
5808 }
5809
5810 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5811 {
5812         int err = 0;
5813         u32 bmsr, bmcr;
5814         u32 current_speed = SPEED_UNKNOWN;
5815         u8 current_duplex = DUPLEX_UNKNOWN;
5816         bool current_link_up = false;
5817         u32 local_adv, remote_adv, sgsr;
5818
5819         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5820              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5821              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5822              (sgsr & SERDES_TG3_SGMII_MODE)) {
5823
5824                 if (force_reset)
5825                         tg3_phy_reset(tp);
5826
5827                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5828
5829                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5830                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831                 } else {
5832                         current_link_up = true;
5833                         if (sgsr & SERDES_TG3_SPEED_1000) {
5834                                 current_speed = SPEED_1000;
5835                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5837                                 current_speed = SPEED_100;
5838                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839                         } else {
5840                                 current_speed = SPEED_10;
5841                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5842                         }
5843
5844                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5845                                 current_duplex = DUPLEX_FULL;
5846                         else
5847                                 current_duplex = DUPLEX_HALF;
5848                 }
5849
5850                 tw32_f(MAC_MODE, tp->mac_mode);
5851                 udelay(40);
5852
5853                 tg3_clear_mac_status(tp);
5854
5855                 goto fiber_setup_done;
5856         }
5857
5858         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5859         tw32_f(MAC_MODE, tp->mac_mode);
5860         udelay(40);
5861
5862         tg3_clear_mac_status(tp);
5863
5864         if (force_reset)
5865                 tg3_phy_reset(tp);
5866
5867         tp->link_config.rmt_adv = 0;
5868
5869         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5871         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5872                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5873                         bmsr |= BMSR_LSTATUS;
5874                 else
5875                         bmsr &= ~BMSR_LSTATUS;
5876         }
5877
5878         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5879
5880         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5881             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5882                 /* do nothing, just check for link up at the end */
5883         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5884                 u32 adv, newadv;
5885
5886                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5887                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5888                                  ADVERTISE_1000XPAUSE |
5889                                  ADVERTISE_1000XPSE_ASYM |
5890                                  ADVERTISE_SLCT);
5891
5892                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5893                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5894
5895                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5896                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5897                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5898                         tg3_writephy(tp, MII_BMCR, bmcr);
5899
5900                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5901                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5902                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5903
5904                         return err;
5905                 }
5906         } else {
5907                 u32 new_bmcr;
5908
5909                 bmcr &= ~BMCR_SPEED1000;
5910                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5911
5912                 if (tp->link_config.duplex == DUPLEX_FULL)
5913                         new_bmcr |= BMCR_FULLDPLX;
5914
5915                 if (new_bmcr != bmcr) {
5916                         /* BMCR_SPEED1000 is a reserved bit that needs
5917                          * to be set on write.
5918                          */
5919                         new_bmcr |= BMCR_SPEED1000;
5920
5921                         /* Force a linkdown */
5922                         if (tp->link_up) {
5923                                 u32 adv;
5924
5925                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5926                                 adv &= ~(ADVERTISE_1000XFULL |
5927                                          ADVERTISE_1000XHALF |
5928                                          ADVERTISE_SLCT);
5929                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5930                                 tg3_writephy(tp, MII_BMCR, bmcr |
5931                                                            BMCR_ANRESTART |
5932                                                            BMCR_ANENABLE);
5933                                 udelay(10);
5934                                 tg3_carrier_off(tp);
5935                         }
5936                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5937                         bmcr = new_bmcr;
5938                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5940                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5941                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5942                                         bmsr |= BMSR_LSTATUS;
5943                                 else
5944                                         bmsr &= ~BMSR_LSTATUS;
5945                         }
5946                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5947                 }
5948         }
5949
5950         if (bmsr & BMSR_LSTATUS) {
5951                 current_speed = SPEED_1000;
5952                 current_link_up = true;
5953                 if (bmcr & BMCR_FULLDPLX)
5954                         current_duplex = DUPLEX_FULL;
5955                 else
5956                         current_duplex = DUPLEX_HALF;
5957
5958                 local_adv = 0;
5959                 remote_adv = 0;
5960
5961                 if (bmcr & BMCR_ANENABLE) {
5962                         u32 common;
5963
5964                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5965                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5966                         common = local_adv & remote_adv;
5967                         if (common & (ADVERTISE_1000XHALF |
5968                                       ADVERTISE_1000XFULL)) {
5969                                 if (common & ADVERTISE_1000XFULL)
5970                                         current_duplex = DUPLEX_FULL;
5971                                 else
5972                                         current_duplex = DUPLEX_HALF;
5973
5974                                 tp->link_config.rmt_adv =
5975                                            mii_adv_to_ethtool_adv_x(remote_adv);
5976                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5977                                 /* Link is up via parallel detect */
5978                         } else {
5979                                 current_link_up = false;
5980                         }
5981                 }
5982         }
5983
5984 fiber_setup_done:
5985         if (current_link_up && current_duplex == DUPLEX_FULL)
5986                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5987
5988         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5989         if (tp->link_config.active_duplex == DUPLEX_HALF)
5990                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5991
5992         tw32_f(MAC_MODE, tp->mac_mode);
5993         udelay(40);
5994
5995         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5996
5997         tp->link_config.active_speed = current_speed;
5998         tp->link_config.active_duplex = current_duplex;
5999
6000         tg3_test_and_report_link_chg(tp, current_link_up);
6001         return err;
6002 }
6003
6004 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6005 {
6006         if (tp->serdes_counter) {
6007                 /* Give autoneg time to complete. */
6008                 tp->serdes_counter--;
6009                 return;
6010         }
6011
6012         if (!tp->link_up &&
6013             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6014                 u32 bmcr;
6015
6016                 tg3_readphy(tp, MII_BMCR, &bmcr);
6017                 if (bmcr & BMCR_ANENABLE) {
6018                         u32 phy1, phy2;
6019
6020                         /* Select shadow register 0x1f */
6021                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6022                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6023
6024                         /* Select expansion interrupt status register */
6025                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6026                                          MII_TG3_DSP_EXP1_INT_STAT);
6027                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029
6030                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6031                                 /* We have signal detect and not receiving
6032                                  * config code words, link is up by parallel
6033                                  * detection.
6034                                  */
6035
6036                                 bmcr &= ~BMCR_ANENABLE;
6037                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6038                                 tg3_writephy(tp, MII_BMCR, bmcr);
6039                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6040                         }
6041                 }
6042         } else if (tp->link_up &&
6043                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6044                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6045                 u32 phy2;
6046
6047                 /* Select expansion interrupt status register */
6048                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6049                                  MII_TG3_DSP_EXP1_INT_STAT);
6050                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6051                 if (phy2 & 0x20) {
6052                         u32 bmcr;
6053
6054                         /* Config code words received, turn on autoneg. */
6055                         tg3_readphy(tp, MII_BMCR, &bmcr);
6056                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6057
6058                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6059
6060                 }
6061         }
6062 }
6063
6064 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6065 {
6066         u32 val;
6067         int err;
6068
6069         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6070                 err = tg3_setup_fiber_phy(tp, force_reset);
6071         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6072                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6073         else
6074                 err = tg3_setup_copper_phy(tp, force_reset);
6075
6076         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6077                 u32 scale;
6078
6079                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6080                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6081                         scale = 65;
6082                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6083                         scale = 6;
6084                 else
6085                         scale = 12;
6086
6087                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6088                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6089                 tw32(GRC_MISC_CFG, val);
6090         }
6091
6092         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6093               (6 << TX_LENGTHS_IPG_SHIFT);
6094         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6095             tg3_asic_rev(tp) == ASIC_REV_5762)
6096                 val |= tr32(MAC_TX_LENGTHS) &
6097                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6098                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6099
6100         if (tp->link_config.active_speed == SPEED_1000 &&
6101             tp->link_config.active_duplex == DUPLEX_HALF)
6102                 tw32(MAC_TX_LENGTHS, val |
6103                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6104         else
6105                 tw32(MAC_TX_LENGTHS, val |
6106                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6107
6108         if (!tg3_flag(tp, 5705_PLUS)) {
6109                 if (tp->link_up) {
6110                         tw32(HOSTCC_STAT_COAL_TICKS,
6111                              tp->coal.stats_block_coalesce_usecs);
6112                 } else {
6113                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6114                 }
6115         }
6116
6117         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6118                 val = tr32(PCIE_PWR_MGMT_THRESH);
6119                 if (!tp->link_up)
6120                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6121                               tp->pwrmgmt_thresh;
6122                 else
6123                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6124                 tw32(PCIE_PWR_MGMT_THRESH, val);
6125         }
6126
6127         return err;
6128 }
6129
6130 /* tp->lock must be held */
6131 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6132 {
6133         u64 stamp;
6134
6135         ptp_read_system_prets(sts);
6136         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6137         ptp_read_system_postts(sts);
6138         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6139
6140         return stamp;
6141 }
6142
6143 /* tp->lock must be held */
6144 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6145 {
6146         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6147
6148         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6149         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6150         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6151         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 }
6153
6154 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6155 static inline void tg3_full_unlock(struct tg3 *tp);
6156 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6157 {
6158         struct tg3 *tp = netdev_priv(dev);
6159
6160         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6161                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6162                                 SOF_TIMESTAMPING_SOFTWARE;
6163
6164         if (tg3_flag(tp, PTP_CAPABLE)) {
6165                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6166                                         SOF_TIMESTAMPING_RX_HARDWARE |
6167                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6168         }
6169
6170         if (tp->ptp_clock)
6171                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6172         else
6173                 info->phc_index = -1;
6174
6175         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6176
6177         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6178                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6179                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6180                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6181         return 0;
6182 }
6183
6184 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6185 {
6186         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6187         u64 correction;
6188         bool neg_adj;
6189
6190         /* Frequency adjustment is performed using hardware with a 24 bit
6191          * accumulator and a programmable correction value. On each clk, the
6192          * correction value gets added to the accumulator and when it
6193          * overflows, the time counter is incremented/decremented.
6194          */
6195         neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6196
6197         tg3_full_lock(tp, 0);
6198
6199         if (correction)
6200                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6201                      TG3_EAV_REF_CLK_CORRECT_EN |
6202                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6203                      ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6204         else
6205                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6206
6207         tg3_full_unlock(tp);
6208
6209         return 0;
6210 }
6211
6212 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6213 {
6214         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215
6216         tg3_full_lock(tp, 0);
6217         tp->ptp_adjust += delta;
6218         tg3_full_unlock(tp);
6219
6220         return 0;
6221 }
6222
6223 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6224                             struct ptp_system_timestamp *sts)
6225 {
6226         u64 ns;
6227         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229         tg3_full_lock(tp, 0);
6230         ns = tg3_refclk_read(tp, sts);
6231         ns += tp->ptp_adjust;
6232         tg3_full_unlock(tp);
6233
6234         *ts = ns_to_timespec64(ns);
6235
6236         return 0;
6237 }
6238
6239 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6240                            const struct timespec64 *ts)
6241 {
6242         u64 ns;
6243         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244
6245         ns = timespec64_to_ns(ts);
6246
6247         tg3_full_lock(tp, 0);
6248         tg3_refclk_write(tp, ns);
6249         tp->ptp_adjust = 0;
6250         tg3_full_unlock(tp);
6251
6252         return 0;
6253 }
6254
6255 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6256                           struct ptp_clock_request *rq, int on)
6257 {
6258         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6259         u32 clock_ctl;
6260         int rval = 0;
6261
6262         switch (rq->type) {
6263         case PTP_CLK_REQ_PEROUT:
6264                 /* Reject requests with unsupported flags */
6265                 if (rq->perout.flags)
6266                         return -EOPNOTSUPP;
6267
6268                 if (rq->perout.index != 0)
6269                         return -EINVAL;
6270
6271                 tg3_full_lock(tp, 0);
6272                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6273                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6274
6275                 if (on) {
6276                         u64 nsec;
6277
6278                         nsec = rq->perout.start.sec * 1000000000ULL +
6279                                rq->perout.start.nsec;
6280
6281                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6282                                 netdev_warn(tp->dev,
6283                                             "Device supports only a one-shot timesync output, period must be 0\n");
6284                                 rval = -EINVAL;
6285                                 goto err_out;
6286                         }
6287
6288                         if (nsec & (1ULL << 63)) {
6289                                 netdev_warn(tp->dev,
6290                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6291                                 rval = -EINVAL;
6292                                 goto err_out;
6293                         }
6294
6295                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6296                         tw32(TG3_EAV_WATCHDOG0_MSB,
6297                              TG3_EAV_WATCHDOG0_EN |
6298                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6299
6300                         tw32(TG3_EAV_REF_CLCK_CTL,
6301                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6302                 } else {
6303                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6304                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6305                 }
6306
6307 err_out:
6308                 tg3_full_unlock(tp);
6309                 return rval;
6310
6311         default:
6312                 break;
6313         }
6314
6315         return -EOPNOTSUPP;
6316 }
6317
6318 static const struct ptp_clock_info tg3_ptp_caps = {
6319         .owner          = THIS_MODULE,
6320         .name           = "tg3 clock",
6321         .max_adj        = 250000000,
6322         .n_alarm        = 0,
6323         .n_ext_ts       = 0,
6324         .n_per_out      = 1,
6325         .n_pins         = 0,
6326         .pps            = 0,
6327         .adjfine        = tg3_ptp_adjfine,
6328         .adjtime        = tg3_ptp_adjtime,
6329         .gettimex64     = tg3_ptp_gettimex,
6330         .settime64      = tg3_ptp_settime,
6331         .enable         = tg3_ptp_enable,
6332 };
6333
6334 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6335                                      struct skb_shared_hwtstamps *timestamp)
6336 {
6337         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6338         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6339                                            tp->ptp_adjust);
6340 }
6341
6342 /* tp->lock must be held */
6343 static void tg3_ptp_init(struct tg3 *tp)
6344 {
6345         if (!tg3_flag(tp, PTP_CAPABLE))
6346                 return;
6347
6348         /* Initialize the hardware clock to the system time. */
6349         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6350         tp->ptp_adjust = 0;
6351         tp->ptp_info = tg3_ptp_caps;
6352 }
6353
6354 /* tp->lock must be held */
6355 static void tg3_ptp_resume(struct tg3 *tp)
6356 {
6357         if (!tg3_flag(tp, PTP_CAPABLE))
6358                 return;
6359
6360         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6361         tp->ptp_adjust = 0;
6362 }
6363
6364 static void tg3_ptp_fini(struct tg3 *tp)
6365 {
6366         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6367                 return;
6368
6369         ptp_clock_unregister(tp->ptp_clock);
6370         tp->ptp_clock = NULL;
6371         tp->ptp_adjust = 0;
6372 }
6373
6374 static inline int tg3_irq_sync(struct tg3 *tp)
6375 {
6376         return tp->irq_sync;
6377 }
6378
6379 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6380 {
6381         int i;
6382
6383         dst = (u32 *)((u8 *)dst + off);
6384         for (i = 0; i < len; i += sizeof(u32))
6385                 *dst++ = tr32(off + i);
6386 }
6387
6388 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6389 {
6390         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6391         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6392         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6393         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6394         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6395         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6396         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6397         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6398         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6399         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6400         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6401         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6402         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6403         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6404         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6405         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6406         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6407         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6408         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6409
6410         if (tg3_flag(tp, SUPPORT_MSIX))
6411                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6412
6413         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6414         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6415         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6416         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6417         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6418         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6419         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6420         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6421
6422         if (!tg3_flag(tp, 5705_PLUS)) {
6423                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6424                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6425                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6426         }
6427
6428         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6429         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6430         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6431         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6432         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6433
6434         if (tg3_flag(tp, NVRAM))
6435                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6436 }
6437
6438 static void tg3_dump_state(struct tg3 *tp)
6439 {
6440         int i;
6441         u32 *regs;
6442
6443         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6444         if (!regs)
6445                 return;
6446
6447         if (tg3_flag(tp, PCI_EXPRESS)) {
6448                 /* Read up to but not including private PCI registers */
6449                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6450                         regs[i / sizeof(u32)] = tr32(i);
6451         } else
6452                 tg3_dump_legacy_regs(tp, regs);
6453
6454         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6455                 if (!regs[i + 0] && !regs[i + 1] &&
6456                     !regs[i + 2] && !regs[i + 3])
6457                         continue;
6458
6459                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6460                            i * 4,
6461                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6462         }
6463
6464         kfree(regs);
6465
6466         for (i = 0; i < tp->irq_cnt; i++) {
6467                 struct tg3_napi *tnapi = &tp->napi[i];
6468
6469                 /* SW status block */
6470                 netdev_err(tp->dev,
6471                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6472                            i,
6473                            tnapi->hw_status->status,
6474                            tnapi->hw_status->status_tag,
6475                            tnapi->hw_status->rx_jumbo_consumer,
6476                            tnapi->hw_status->rx_consumer,
6477                            tnapi->hw_status->rx_mini_consumer,
6478                            tnapi->hw_status->idx[0].rx_producer,
6479                            tnapi->hw_status->idx[0].tx_consumer);
6480
6481                 netdev_err(tp->dev,
6482                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6483                            i,
6484                            tnapi->last_tag, tnapi->last_irq_tag,
6485                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6486                            tnapi->rx_rcb_ptr,
6487                            tnapi->prodring.rx_std_prod_idx,
6488                            tnapi->prodring.rx_std_cons_idx,
6489                            tnapi->prodring.rx_jmb_prod_idx,
6490                            tnapi->prodring.rx_jmb_cons_idx);
6491         }
6492 }
6493
6494 /* This is called whenever we suspect that the system chipset is re-
6495  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6496  * is bogus tx completions. We try to recover by setting the
6497  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6498  * in the workqueue.
6499  */
6500 static void tg3_tx_recover(struct tg3 *tp)
6501 {
6502         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6503                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6504
6505         netdev_warn(tp->dev,
6506                     "The system may be re-ordering memory-mapped I/O "
6507                     "cycles to the network device, attempting to recover. "
6508                     "Please report the problem to the driver maintainer "
6509                     "and include system chipset information.\n");
6510
6511         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6512 }
6513
6514 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6515 {
6516         /* Tell compiler to fetch tx indices from memory. */
6517         barrier();
6518         return tnapi->tx_pending -
6519                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6520 }
6521
6522 /* Tigon3 never reports partial packet sends.  So we do not
6523  * need special logic to handle SKBs that have not had all
6524  * of their frags sent yet, like SunGEM does.
6525  */
6526 static void tg3_tx(struct tg3_napi *tnapi)
6527 {
6528         struct tg3 *tp = tnapi->tp;
6529         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6530         u32 sw_idx = tnapi->tx_cons;
6531         struct netdev_queue *txq;
6532         int index = tnapi - tp->napi;
6533         unsigned int pkts_compl = 0, bytes_compl = 0;
6534
6535         if (tg3_flag(tp, ENABLE_TSS))
6536                 index--;
6537
6538         txq = netdev_get_tx_queue(tp->dev, index);
6539
6540         while (sw_idx != hw_idx) {
6541                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6542                 struct sk_buff *skb = ri->skb;
6543                 int i, tx_bug = 0;
6544
6545                 if (unlikely(skb == NULL)) {
6546                         tg3_tx_recover(tp);
6547                         return;
6548                 }
6549
6550                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6551                         struct skb_shared_hwtstamps timestamp;
6552                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6553                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6554
6555                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6556
6557                         skb_tstamp_tx(skb, &timestamp);
6558                 }
6559
6560                 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6561                                  skb_headlen(skb), DMA_TO_DEVICE);
6562
6563                 ri->skb = NULL;
6564
6565                 while (ri->fragmented) {
6566                         ri->fragmented = false;
6567                         sw_idx = NEXT_TX(sw_idx);
6568                         ri = &tnapi->tx_buffers[sw_idx];
6569                 }
6570
6571                 sw_idx = NEXT_TX(sw_idx);
6572
6573                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6574                         ri = &tnapi->tx_buffers[sw_idx];
6575                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6576                                 tx_bug = 1;
6577
6578                         dma_unmap_page(&tp->pdev->dev,
6579                                        dma_unmap_addr(ri, mapping),
6580                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6581                                        DMA_TO_DEVICE);
6582
6583                         while (ri->fragmented) {
6584                                 ri->fragmented = false;
6585                                 sw_idx = NEXT_TX(sw_idx);
6586                                 ri = &tnapi->tx_buffers[sw_idx];
6587                         }
6588
6589                         sw_idx = NEXT_TX(sw_idx);
6590                 }
6591
6592                 pkts_compl++;
6593                 bytes_compl += skb->len;
6594
6595                 dev_consume_skb_any(skb);
6596
6597                 if (unlikely(tx_bug)) {
6598                         tg3_tx_recover(tp);
6599                         return;
6600                 }
6601         }
6602
6603         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6604
6605         tnapi->tx_cons = sw_idx;
6606
6607         /* Need to make the tx_cons update visible to tg3_start_xmit()
6608          * before checking for netif_queue_stopped().  Without the
6609          * memory barrier, there is a small possibility that tg3_start_xmit()
6610          * will miss it and cause the queue to be stopped forever.
6611          */
6612         smp_mb();
6613
6614         if (unlikely(netif_tx_queue_stopped(txq) &&
6615                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6616                 __netif_tx_lock(txq, smp_processor_id());
6617                 if (netif_tx_queue_stopped(txq) &&
6618                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6619                         netif_tx_wake_queue(txq);
6620                 __netif_tx_unlock(txq);
6621         }
6622 }
6623
6624 static void tg3_frag_free(bool is_frag, void *data)
6625 {
6626         if (is_frag)
6627                 skb_free_frag(data);
6628         else
6629                 kfree(data);
6630 }
6631
6632 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6633 {
6634         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6635                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6636
6637         if (!ri->data)
6638                 return;
6639
6640         dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6641                          DMA_FROM_DEVICE);
6642         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6643         ri->data = NULL;
6644 }
6645
6646
6647 /* Returns size of skb allocated or < 0 on error.
6648  *
6649  * We only need to fill in the address because the other members
6650  * of the RX descriptor are invariant, see tg3_init_rings.
6651  *
6652  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6653  * posting buffers we only dirty the first cache line of the RX
6654  * descriptor (containing the address).  Whereas for the RX status
6655  * buffers the cpu only reads the last cacheline of the RX descriptor
6656  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6657  */
6658 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6659                              u32 opaque_key, u32 dest_idx_unmasked,
6660                              unsigned int *frag_size)
6661 {
6662         struct tg3_rx_buffer_desc *desc;
6663         struct ring_info *map;
6664         u8 *data;
6665         dma_addr_t mapping;
6666         int skb_size, data_size, dest_idx;
6667
6668         switch (opaque_key) {
6669         case RXD_OPAQUE_RING_STD:
6670                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6671                 desc = &tpr->rx_std[dest_idx];
6672                 map = &tpr->rx_std_buffers[dest_idx];
6673                 data_size = tp->rx_pkt_map_sz;
6674                 break;
6675
6676         case RXD_OPAQUE_RING_JUMBO:
6677                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6678                 desc = &tpr->rx_jmb[dest_idx].std;
6679                 map = &tpr->rx_jmb_buffers[dest_idx];
6680                 data_size = TG3_RX_JMB_MAP_SZ;
6681                 break;
6682
6683         default:
6684                 return -EINVAL;
6685         }
6686
6687         /* Do not overwrite any of the map or rp information
6688          * until we are sure we can commit to a new buffer.
6689          *
6690          * Callers depend upon this behavior and assume that
6691          * we leave everything unchanged if we fail.
6692          */
6693         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6694                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6695         if (skb_size <= PAGE_SIZE) {
6696                 data = napi_alloc_frag(skb_size);
6697                 *frag_size = skb_size;
6698         } else {
6699                 data = kmalloc(skb_size, GFP_ATOMIC);
6700                 *frag_size = 0;
6701         }
6702         if (!data)
6703                 return -ENOMEM;
6704
6705         mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6706                                  data_size, DMA_FROM_DEVICE);
6707         if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6708                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6709                 return -EIO;
6710         }
6711
6712         map->data = data;
6713         dma_unmap_addr_set(map, mapping, mapping);
6714
6715         desc->addr_hi = ((u64)mapping >> 32);
6716         desc->addr_lo = ((u64)mapping & 0xffffffff);
6717
6718         return data_size;
6719 }
6720
6721 /* We only need to move over in the address because the other
6722  * members of the RX descriptor are invariant.  See notes above
6723  * tg3_alloc_rx_data for full details.
6724  */
6725 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6726                            struct tg3_rx_prodring_set *dpr,
6727                            u32 opaque_key, int src_idx,
6728                            u32 dest_idx_unmasked)
6729 {
6730         struct tg3 *tp = tnapi->tp;
6731         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6732         struct ring_info *src_map, *dest_map;
6733         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6734         int dest_idx;
6735
6736         switch (opaque_key) {
6737         case RXD_OPAQUE_RING_STD:
6738                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6739                 dest_desc = &dpr->rx_std[dest_idx];
6740                 dest_map = &dpr->rx_std_buffers[dest_idx];
6741                 src_desc = &spr->rx_std[src_idx];
6742                 src_map = &spr->rx_std_buffers[src_idx];
6743                 break;
6744
6745         case RXD_OPAQUE_RING_JUMBO:
6746                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6747                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6748                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6749                 src_desc = &spr->rx_jmb[src_idx].std;
6750                 src_map = &spr->rx_jmb_buffers[src_idx];
6751                 break;
6752
6753         default:
6754                 return;
6755         }
6756
6757         dest_map->data = src_map->data;
6758         dma_unmap_addr_set(dest_map, mapping,
6759                            dma_unmap_addr(src_map, mapping));
6760         dest_desc->addr_hi = src_desc->addr_hi;
6761         dest_desc->addr_lo = src_desc->addr_lo;
6762
6763         /* Ensure that the update to the skb happens after the physical
6764          * addresses have been transferred to the new BD location.
6765          */
6766         smp_wmb();
6767
6768         src_map->data = NULL;
6769 }
6770
6771 /* The RX ring scheme is composed of multiple rings which post fresh
6772  * buffers to the chip, and one special ring the chip uses to report
6773  * status back to the host.
6774  *
6775  * The special ring reports the status of received packets to the
6776  * host.  The chip does not write into the original descriptor the
6777  * RX buffer was obtained from.  The chip simply takes the original
6778  * descriptor as provided by the host, updates the status and length
6779  * field, then writes this into the next status ring entry.
6780  *
6781  * Each ring the host uses to post buffers to the chip is described
6782  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6783  * it is first placed into the on-chip ram.  When the packet's length
6784  * is known, it walks down the TG3_BDINFO entries to select the ring.
6785  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6786  * which is within the range of the new packet's length is chosen.
6787  *
6788  * The "separate ring for rx status" scheme may sound queer, but it makes
6789  * sense from a cache coherency perspective.  If only the host writes
6790  * to the buffer post rings, and only the chip writes to the rx status
6791  * rings, then cache lines never move beyond shared-modified state.
6792  * If both the host and chip were to write into the same ring, cache line
6793  * eviction could occur since both entities want it in an exclusive state.
6794  */
6795 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6796 {
6797         struct tg3 *tp = tnapi->tp;
6798         u32 work_mask, rx_std_posted = 0;
6799         u32 std_prod_idx, jmb_prod_idx;
6800         u32 sw_idx = tnapi->rx_rcb_ptr;
6801         u16 hw_idx;
6802         int received;
6803         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6804
6805         hw_idx = *(tnapi->rx_rcb_prod_idx);
6806         /*
6807          * We need to order the read of hw_idx and the read of
6808          * the opaque cookie.
6809          */
6810         rmb();
6811         work_mask = 0;
6812         received = 0;
6813         std_prod_idx = tpr->rx_std_prod_idx;
6814         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6815         while (sw_idx != hw_idx && budget > 0) {
6816                 struct ring_info *ri;
6817                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6818                 unsigned int len;
6819                 struct sk_buff *skb;
6820                 dma_addr_t dma_addr;
6821                 u32 opaque_key, desc_idx, *post_ptr;
6822                 u8 *data;
6823                 u64 tstamp = 0;
6824
6825                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6826                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6827                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6828                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6829                         dma_addr = dma_unmap_addr(ri, mapping);
6830                         data = ri->data;
6831                         post_ptr = &std_prod_idx;
6832                         rx_std_posted++;
6833                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6834                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6835                         dma_addr = dma_unmap_addr(ri, mapping);
6836                         data = ri->data;
6837                         post_ptr = &jmb_prod_idx;
6838                 } else
6839                         goto next_pkt_nopost;
6840
6841                 work_mask |= opaque_key;
6842
6843                 if (desc->err_vlan & RXD_ERR_MASK) {
6844                 drop_it:
6845                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6846                                        desc_idx, *post_ptr);
6847                 drop_it_no_recycle:
6848                         /* Other statistics kept track of by card. */
6849                         tp->rx_dropped++;
6850                         goto next_pkt;
6851                 }
6852
6853                 prefetch(data + TG3_RX_OFFSET(tp));
6854                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6855                       ETH_FCS_LEN;
6856
6857                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858                      RXD_FLAG_PTPSTAT_PTPV1 ||
6859                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6860                      RXD_FLAG_PTPSTAT_PTPV2) {
6861                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6862                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6863                 }
6864
6865                 if (len > TG3_RX_COPY_THRESH(tp)) {
6866                         int skb_size;
6867                         unsigned int frag_size;
6868
6869                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6870                                                     *post_ptr, &frag_size);
6871                         if (skb_size < 0)
6872                                 goto drop_it;
6873
6874                         dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6875                                          DMA_FROM_DEVICE);
6876
6877                         /* Ensure that the update to the data happens
6878                          * after the usage of the old DMA mapping.
6879                          */
6880                         smp_wmb();
6881
6882                         ri->data = NULL;
6883
6884                         skb = build_skb(data, frag_size);
6885                         if (!skb) {
6886                                 tg3_frag_free(frag_size != 0, data);
6887                                 goto drop_it_no_recycle;
6888                         }
6889                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6890                 } else {
6891                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6892                                        desc_idx, *post_ptr);
6893
6894                         skb = netdev_alloc_skb(tp->dev,
6895                                                len + TG3_RAW_IP_ALIGN);
6896                         if (skb == NULL)
6897                                 goto drop_it_no_recycle;
6898
6899                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6900                         dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6901                                                 DMA_FROM_DEVICE);
6902                         memcpy(skb->data,
6903                                data + TG3_RX_OFFSET(tp),
6904                                len);
6905                         dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6906                                                    len, DMA_FROM_DEVICE);
6907                 }
6908
6909                 skb_put(skb, len);
6910                 if (tstamp)
6911                         tg3_hwclock_to_timestamp(tp, tstamp,
6912                                                  skb_hwtstamps(skb));
6913
6914                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6915                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6916                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6917                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6918                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6919                 else
6920                         skb_checksum_none_assert(skb);
6921
6922                 skb->protocol = eth_type_trans(skb, tp->dev);
6923
6924                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6925                     skb->protocol != htons(ETH_P_8021Q) &&
6926                     skb->protocol != htons(ETH_P_8021AD)) {
6927                         dev_kfree_skb_any(skb);
6928                         goto drop_it_no_recycle;
6929                 }
6930
6931                 if (desc->type_flags & RXD_FLAG_VLAN &&
6932                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6933                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6934                                                desc->err_vlan & RXD_VLAN_MASK);
6935
6936                 napi_gro_receive(&tnapi->napi, skb);
6937
6938                 received++;
6939                 budget--;
6940
6941 next_pkt:
6942                 (*post_ptr)++;
6943
6944                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6945                         tpr->rx_std_prod_idx = std_prod_idx &
6946                                                tp->rx_std_ring_mask;
6947                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6948                                      tpr->rx_std_prod_idx);
6949                         work_mask &= ~RXD_OPAQUE_RING_STD;
6950                         rx_std_posted = 0;
6951                 }
6952 next_pkt_nopost:
6953                 sw_idx++;
6954                 sw_idx &= tp->rx_ret_ring_mask;
6955
6956                 /* Refresh hw_idx to see if there is new work */
6957                 if (sw_idx == hw_idx) {
6958                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6959                         rmb();
6960                 }
6961         }
6962
6963         /* ACK the status ring. */
6964         tnapi->rx_rcb_ptr = sw_idx;
6965         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6966
6967         /* Refill RX ring(s). */
6968         if (!tg3_flag(tp, ENABLE_RSS)) {
6969                 /* Sync BD data before updating mailbox */
6970                 wmb();
6971
6972                 if (work_mask & RXD_OPAQUE_RING_STD) {
6973                         tpr->rx_std_prod_idx = std_prod_idx &
6974                                                tp->rx_std_ring_mask;
6975                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6976                                      tpr->rx_std_prod_idx);
6977                 }
6978                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6979                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6980                                                tp->rx_jmb_ring_mask;
6981                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6982                                      tpr->rx_jmb_prod_idx);
6983                 }
6984         } else if (work_mask) {
6985                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6986                  * updated before the producer indices can be updated.
6987                  */
6988                 smp_wmb();
6989
6990                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6991                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6992
6993                 if (tnapi != &tp->napi[1]) {
6994                         tp->rx_refill = true;
6995                         napi_schedule(&tp->napi[1].napi);
6996                 }
6997         }
6998
6999         return received;
7000 }
7001
7002 static void tg3_poll_link(struct tg3 *tp)
7003 {
7004         /* handle link change and other phy events */
7005         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7006                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7007
7008                 if (sblk->status & SD_STATUS_LINK_CHG) {
7009                         sblk->status = SD_STATUS_UPDATED |
7010                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7011                         spin_lock(&tp->lock);
7012                         if (tg3_flag(tp, USE_PHYLIB)) {
7013                                 tw32_f(MAC_STATUS,
7014                                      (MAC_STATUS_SYNC_CHANGED |
7015                                       MAC_STATUS_CFG_CHANGED |
7016                                       MAC_STATUS_MI_COMPLETION |
7017                                       MAC_STATUS_LNKSTATE_CHANGED));
7018                                 udelay(40);
7019                         } else
7020                                 tg3_setup_phy(tp, false);
7021                         spin_unlock(&tp->lock);
7022                 }
7023         }
7024 }
7025
7026 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7027                                 struct tg3_rx_prodring_set *dpr,
7028                                 struct tg3_rx_prodring_set *spr)
7029 {
7030         u32 si, di, cpycnt, src_prod_idx;
7031         int i, err = 0;
7032
7033         while (1) {
7034                 src_prod_idx = spr->rx_std_prod_idx;
7035
7036                 /* Make sure updates to the rx_std_buffers[] entries and the
7037                  * standard producer index are seen in the correct order.
7038                  */
7039                 smp_rmb();
7040
7041                 if (spr->rx_std_cons_idx == src_prod_idx)
7042                         break;
7043
7044                 if (spr->rx_std_cons_idx < src_prod_idx)
7045                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7046                 else
7047                         cpycnt = tp->rx_std_ring_mask + 1 -
7048                                  spr->rx_std_cons_idx;
7049
7050                 cpycnt = min(cpycnt,
7051                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7052
7053                 si = spr->rx_std_cons_idx;
7054                 di = dpr->rx_std_prod_idx;
7055
7056                 for (i = di; i < di + cpycnt; i++) {
7057                         if (dpr->rx_std_buffers[i].data) {
7058                                 cpycnt = i - di;
7059                                 err = -ENOSPC;
7060                                 break;
7061                         }
7062                 }
7063
7064                 if (!cpycnt)
7065                         break;
7066
7067                 /* Ensure that updates to the rx_std_buffers ring and the
7068                  * shadowed hardware producer ring from tg3_recycle_skb() are
7069                  * ordered correctly WRT the skb check above.
7070                  */
7071                 smp_rmb();
7072
7073                 memcpy(&dpr->rx_std_buffers[di],
7074                        &spr->rx_std_buffers[si],
7075                        cpycnt * sizeof(struct ring_info));
7076
7077                 for (i = 0; i < cpycnt; i++, di++, si++) {
7078                         struct tg3_rx_buffer_desc *sbd, *dbd;
7079                         sbd = &spr->rx_std[si];
7080                         dbd = &dpr->rx_std[di];
7081                         dbd->addr_hi = sbd->addr_hi;
7082                         dbd->addr_lo = sbd->addr_lo;
7083                 }
7084
7085                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7086                                        tp->rx_std_ring_mask;
7087                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7088                                        tp->rx_std_ring_mask;
7089         }
7090
7091         while (1) {
7092                 src_prod_idx = spr->rx_jmb_prod_idx;
7093
7094                 /* Make sure updates to the rx_jmb_buffers[] entries and
7095                  * the jumbo producer index are seen in the correct order.
7096                  */
7097                 smp_rmb();
7098
7099                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7100                         break;
7101
7102                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7103                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7104                 else
7105                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7106                                  spr->rx_jmb_cons_idx;
7107
7108                 cpycnt = min(cpycnt,
7109                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7110
7111                 si = spr->rx_jmb_cons_idx;
7112                 di = dpr->rx_jmb_prod_idx;
7113
7114                 for (i = di; i < di + cpycnt; i++) {
7115                         if (dpr->rx_jmb_buffers[i].data) {
7116                                 cpycnt = i - di;
7117                                 err = -ENOSPC;
7118                                 break;
7119                         }
7120                 }
7121
7122                 if (!cpycnt)
7123                         break;
7124
7125                 /* Ensure that updates to the rx_jmb_buffers ring and the
7126                  * shadowed hardware producer ring from tg3_recycle_skb() are
7127                  * ordered correctly WRT the skb check above.
7128                  */
7129                 smp_rmb();
7130
7131                 memcpy(&dpr->rx_jmb_buffers[di],
7132                        &spr->rx_jmb_buffers[si],
7133                        cpycnt * sizeof(struct ring_info));
7134
7135                 for (i = 0; i < cpycnt; i++, di++, si++) {
7136                         struct tg3_rx_buffer_desc *sbd, *dbd;
7137                         sbd = &spr->rx_jmb[si].std;
7138                         dbd = &dpr->rx_jmb[di].std;
7139                         dbd->addr_hi = sbd->addr_hi;
7140                         dbd->addr_lo = sbd->addr_lo;
7141                 }
7142
7143                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7144                                        tp->rx_jmb_ring_mask;
7145                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7146                                        tp->rx_jmb_ring_mask;
7147         }
7148
7149         return err;
7150 }
7151
7152 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7153 {
7154         struct tg3 *tp = tnapi->tp;
7155
7156         /* run TX completion thread */
7157         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7158                 tg3_tx(tnapi);
7159                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7160                         return work_done;
7161         }
7162
7163         if (!tnapi->rx_rcb_prod_idx)
7164                 return work_done;
7165
7166         /* run RX thread, within the bounds set by NAPI.
7167          * All RX "locking" is done by ensuring outside
7168          * code synchronizes with tg3->napi.poll()
7169          */
7170         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7171                 work_done += tg3_rx(tnapi, budget - work_done);
7172
7173         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7174                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7175                 int i, err = 0;
7176                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7177                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7178
7179                 tp->rx_refill = false;
7180                 for (i = 1; i <= tp->rxq_cnt; i++)
7181                         err |= tg3_rx_prodring_xfer(tp, dpr,
7182                                                     &tp->napi[i].prodring);
7183
7184                 wmb();
7185
7186                 if (std_prod_idx != dpr->rx_std_prod_idx)
7187                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7188                                      dpr->rx_std_prod_idx);
7189
7190                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7191                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7192                                      dpr->rx_jmb_prod_idx);
7193
7194                 if (err)
7195                         tw32_f(HOSTCC_MODE, tp->coal_now);
7196         }
7197
7198         return work_done;
7199 }
7200
7201 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7202 {
7203         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204                 schedule_work(&tp->reset_task);
7205 }
7206
7207 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7208 {
7209         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210                 cancel_work_sync(&tp->reset_task);
7211         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7212 }
7213
7214 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7215 {
7216         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217         struct tg3 *tp = tnapi->tp;
7218         int work_done = 0;
7219         struct tg3_hw_status *sblk = tnapi->hw_status;
7220
7221         while (1) {
7222                 work_done = tg3_poll_work(tnapi, work_done, budget);
7223
7224                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7225                         goto tx_recovery;
7226
7227                 if (unlikely(work_done >= budget))
7228                         break;
7229
7230                 /* tp->last_tag is used in tg3_int_reenable() below
7231                  * to tell the hw how much work has been processed,
7232                  * so we must read it before checking for more work.
7233                  */
7234                 tnapi->last_tag = sblk->status_tag;
7235                 tnapi->last_irq_tag = tnapi->last_tag;
7236                 rmb();
7237
7238                 /* check for RX/TX work to do */
7239                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7240                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7241
7242                         /* This test here is not race free, but will reduce
7243                          * the number of interrupts by looping again.
7244                          */
7245                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7246                                 continue;
7247
7248                         napi_complete_done(napi, work_done);
7249                         /* Reenable interrupts. */
7250                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7251
7252                         /* This test here is synchronized by napi_schedule()
7253                          * and napi_complete() to close the race condition.
7254                          */
7255                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7256                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7257                                                   HOSTCC_MODE_ENABLE |
7258                                                   tnapi->coal_now);
7259                         }
7260                         break;
7261                 }
7262         }
7263
7264         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7265         return work_done;
7266
7267 tx_recovery:
7268         /* work_done is guaranteed to be less than budget. */
7269         napi_complete(napi);
7270         tg3_reset_task_schedule(tp);
7271         return work_done;
7272 }
7273
7274 static void tg3_process_error(struct tg3 *tp)
7275 {
7276         u32 val;
7277         bool real_error = false;
7278
7279         if (tg3_flag(tp, ERROR_PROCESSED))
7280                 return;
7281
7282         /* Check Flow Attention register */
7283         val = tr32(HOSTCC_FLOW_ATTN);
7284         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7285                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7286                 real_error = true;
7287         }
7288
7289         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7290                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7291                 real_error = true;
7292         }
7293
7294         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7295                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7296                 real_error = true;
7297         }
7298
7299         if (!real_error)
7300                 return;
7301
7302         tg3_dump_state(tp);
7303
7304         tg3_flag_set(tp, ERROR_PROCESSED);
7305         tg3_reset_task_schedule(tp);
7306 }
7307
7308 static int tg3_poll(struct napi_struct *napi, int budget)
7309 {
7310         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7311         struct tg3 *tp = tnapi->tp;
7312         int work_done = 0;
7313         struct tg3_hw_status *sblk = tnapi->hw_status;
7314
7315         while (1) {
7316                 if (sblk->status & SD_STATUS_ERROR)
7317                         tg3_process_error(tp);
7318
7319                 tg3_poll_link(tp);
7320
7321                 work_done = tg3_poll_work(tnapi, work_done, budget);
7322
7323                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7324                         goto tx_recovery;
7325
7326                 if (unlikely(work_done >= budget))
7327                         break;
7328
7329                 if (tg3_flag(tp, TAGGED_STATUS)) {
7330                         /* tp->last_tag is used in tg3_int_reenable() below
7331                          * to tell the hw how much work has been processed,
7332                          * so we must read it before checking for more work.
7333                          */
7334                         tnapi->last_tag = sblk->status_tag;
7335                         tnapi->last_irq_tag = tnapi->last_tag;
7336                         rmb();
7337                 } else
7338                         sblk->status &= ~SD_STATUS_UPDATED;
7339
7340                 if (likely(!tg3_has_work(tnapi))) {
7341                         napi_complete_done(napi, work_done);
7342                         tg3_int_reenable(tnapi);
7343                         break;
7344                 }
7345         }
7346
7347         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7348         return work_done;
7349
7350 tx_recovery:
7351         /* work_done is guaranteed to be less than budget. */
7352         napi_complete(napi);
7353         tg3_reset_task_schedule(tp);
7354         return work_done;
7355 }
7356
7357 static void tg3_napi_disable(struct tg3 *tp)
7358 {
7359         int i;
7360
7361         for (i = tp->irq_cnt - 1; i >= 0; i--)
7362                 napi_disable(&tp->napi[i].napi);
7363 }
7364
7365 static void tg3_napi_enable(struct tg3 *tp)
7366 {
7367         int i;
7368
7369         for (i = 0; i < tp->irq_cnt; i++)
7370                 napi_enable(&tp->napi[i].napi);
7371 }
7372
7373 static void tg3_napi_init(struct tg3 *tp)
7374 {
7375         int i;
7376
7377         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7378         for (i = 1; i < tp->irq_cnt; i++)
7379                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7380 }
7381
7382 static void tg3_napi_fini(struct tg3 *tp)
7383 {
7384         int i;
7385
7386         for (i = 0; i < tp->irq_cnt; i++)
7387                 netif_napi_del(&tp->napi[i].napi);
7388 }
7389
7390 static inline void tg3_netif_stop(struct tg3 *tp)
7391 {
7392         netif_trans_update(tp->dev);    /* prevent tx timeout */
7393         tg3_napi_disable(tp);
7394         netif_carrier_off(tp->dev);
7395         netif_tx_disable(tp->dev);
7396 }
7397
7398 /* tp->lock must be held */
7399 static inline void tg3_netif_start(struct tg3 *tp)
7400 {
7401         tg3_ptp_resume(tp);
7402
7403         /* NOTE: unconditional netif_tx_wake_all_queues is only
7404          * appropriate so long as all callers are assured to
7405          * have free tx slots (such as after tg3_init_hw)
7406          */
7407         netif_tx_wake_all_queues(tp->dev);
7408
7409         if (tp->link_up)
7410                 netif_carrier_on(tp->dev);
7411
7412         tg3_napi_enable(tp);
7413         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7414         tg3_enable_ints(tp);
7415 }
7416
7417 static void tg3_irq_quiesce(struct tg3 *tp)
7418         __releases(tp->lock)
7419         __acquires(tp->lock)
7420 {
7421         int i;
7422
7423         BUG_ON(tp->irq_sync);
7424
7425         tp->irq_sync = 1;
7426         smp_mb();
7427
7428         spin_unlock_bh(&tp->lock);
7429
7430         for (i = 0; i < tp->irq_cnt; i++)
7431                 synchronize_irq(tp->napi[i].irq_vec);
7432
7433         spin_lock_bh(&tp->lock);
7434 }
7435
7436 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7437  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7438  * with as well.  Most of the time, this is not necessary except when
7439  * shutting down the device.
7440  */
7441 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7442 {
7443         spin_lock_bh(&tp->lock);
7444         if (irq_sync)
7445                 tg3_irq_quiesce(tp);
7446 }
7447
7448 static inline void tg3_full_unlock(struct tg3 *tp)
7449 {
7450         spin_unlock_bh(&tp->lock);
7451 }
7452
7453 /* One-shot MSI handler - Chip automatically disables interrupt
7454  * after sending MSI so driver doesn't have to do it.
7455  */
7456 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7457 {
7458         struct tg3_napi *tnapi = dev_id;
7459         struct tg3 *tp = tnapi->tp;
7460
7461         prefetch(tnapi->hw_status);
7462         if (tnapi->rx_rcb)
7463                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7464
7465         if (likely(!tg3_irq_sync(tp)))
7466                 napi_schedule(&tnapi->napi);
7467
7468         return IRQ_HANDLED;
7469 }
7470
7471 /* MSI ISR - No need to check for interrupt sharing and no need to
7472  * flush status block and interrupt mailbox. PCI ordering rules
7473  * guarantee that MSI will arrive after the status block.
7474  */
7475 static irqreturn_t tg3_msi(int irq, void *dev_id)
7476 {
7477         struct tg3_napi *tnapi = dev_id;
7478         struct tg3 *tp = tnapi->tp;
7479
7480         prefetch(tnapi->hw_status);
7481         if (tnapi->rx_rcb)
7482                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7483         /*
7484          * Writing any value to intr-mbox-0 clears PCI INTA# and
7485          * chip-internal interrupt pending events.
7486          * Writing non-zero to intr-mbox-0 additional tells the
7487          * NIC to stop sending us irqs, engaging "in-intr-handler"
7488          * event coalescing.
7489          */
7490         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7491         if (likely(!tg3_irq_sync(tp)))
7492                 napi_schedule(&tnapi->napi);
7493
7494         return IRQ_RETVAL(1);
7495 }
7496
7497 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7498 {
7499         struct tg3_napi *tnapi = dev_id;
7500         struct tg3 *tp = tnapi->tp;
7501         struct tg3_hw_status *sblk = tnapi->hw_status;
7502         unsigned int handled = 1;
7503
7504         /* In INTx mode, it is possible for the interrupt to arrive at
7505          * the CPU before the status block posted prior to the interrupt.
7506          * Reading the PCI State register will confirm whether the
7507          * interrupt is ours and will flush the status block.
7508          */
7509         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7510                 if (tg3_flag(tp, CHIP_RESETTING) ||
7511                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7512                         handled = 0;
7513                         goto out;
7514                 }
7515         }
7516
7517         /*
7518          * Writing any value to intr-mbox-0 clears PCI INTA# and
7519          * chip-internal interrupt pending events.
7520          * Writing non-zero to intr-mbox-0 additional tells the
7521          * NIC to stop sending us irqs, engaging "in-intr-handler"
7522          * event coalescing.
7523          *
7524          * Flush the mailbox to de-assert the IRQ immediately to prevent
7525          * spurious interrupts.  The flush impacts performance but
7526          * excessive spurious interrupts can be worse in some cases.
7527          */
7528         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7529         if (tg3_irq_sync(tp))
7530                 goto out;
7531         sblk->status &= ~SD_STATUS_UPDATED;
7532         if (likely(tg3_has_work(tnapi))) {
7533                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7534                 napi_schedule(&tnapi->napi);
7535         } else {
7536                 /* No work, shared interrupt perhaps?  re-enable
7537                  * interrupts, and flush that PCI write
7538                  */
7539                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7540                                0x00000000);
7541         }
7542 out:
7543         return IRQ_RETVAL(handled);
7544 }
7545
7546 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7547 {
7548         struct tg3_napi *tnapi = dev_id;
7549         struct tg3 *tp = tnapi->tp;
7550         struct tg3_hw_status *sblk = tnapi->hw_status;
7551         unsigned int handled = 1;
7552
7553         /* In INTx mode, it is possible for the interrupt to arrive at
7554          * the CPU before the status block posted prior to the interrupt.
7555          * Reading the PCI State register will confirm whether the
7556          * interrupt is ours and will flush the status block.
7557          */
7558         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7559                 if (tg3_flag(tp, CHIP_RESETTING) ||
7560                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7561                         handled = 0;
7562                         goto out;
7563                 }
7564         }
7565
7566         /*
7567          * writing any value to intr-mbox-0 clears PCI INTA# and
7568          * chip-internal interrupt pending events.
7569          * writing non-zero to intr-mbox-0 additional tells the
7570          * NIC to stop sending us irqs, engaging "in-intr-handler"
7571          * event coalescing.
7572          *
7573          * Flush the mailbox to de-assert the IRQ immediately to prevent
7574          * spurious interrupts.  The flush impacts performance but
7575          * excessive spurious interrupts can be worse in some cases.
7576          */
7577         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7578
7579         /*
7580          * In a shared interrupt configuration, sometimes other devices'
7581          * interrupts will scream.  We record the current status tag here
7582          * so that the above check can report that the screaming interrupts
7583          * are unhandled.  Eventually they will be silenced.
7584          */
7585         tnapi->last_irq_tag = sblk->status_tag;
7586
7587         if (tg3_irq_sync(tp))
7588                 goto out;
7589
7590         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7591
7592         napi_schedule(&tnapi->napi);
7593
7594 out:
7595         return IRQ_RETVAL(handled);
7596 }
7597
7598 /* ISR for interrupt test */
7599 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7600 {
7601         struct tg3_napi *tnapi = dev_id;
7602         struct tg3 *tp = tnapi->tp;
7603         struct tg3_hw_status *sblk = tnapi->hw_status;
7604
7605         if ((sblk->status & SD_STATUS_UPDATED) ||
7606             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7607                 tg3_disable_ints(tp);
7608                 return IRQ_RETVAL(1);
7609         }
7610         return IRQ_RETVAL(0);
7611 }
7612
7613 #ifdef CONFIG_NET_POLL_CONTROLLER
7614 static void tg3_poll_controller(struct net_device *dev)
7615 {
7616         int i;
7617         struct tg3 *tp = netdev_priv(dev);
7618
7619         if (tg3_irq_sync(tp))
7620                 return;
7621
7622         for (i = 0; i < tp->irq_cnt; i++)
7623                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7624 }
7625 #endif
7626
7627 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7628 {
7629         struct tg3 *tp = netdev_priv(dev);
7630
7631         if (netif_msg_tx_err(tp)) {
7632                 netdev_err(dev, "transmit timed out, resetting\n");
7633                 tg3_dump_state(tp);
7634         }
7635
7636         tg3_reset_task_schedule(tp);
7637 }
7638
7639 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7640 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7641 {
7642         u32 base = (u32) mapping & 0xffffffff;
7643
7644         return base + len + 8 < base;
7645 }
7646
7647 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7648  * of any 4GB boundaries: 4G, 8G, etc
7649  */
7650 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7651                                            u32 len, u32 mss)
7652 {
7653         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7654                 u32 base = (u32) mapping & 0xffffffff;
7655
7656                 return ((base + len + (mss & 0x3fff)) < base);
7657         }
7658         return 0;
7659 }
7660
7661 /* Test for DMA addresses > 40-bit */
7662 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7663                                           int len)
7664 {
7665 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7666         if (tg3_flag(tp, 40BIT_DMA_BUG))
7667                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7668         return 0;
7669 #else
7670         return 0;
7671 #endif
7672 }
7673
7674 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7675                                  dma_addr_t mapping, u32 len, u32 flags,
7676                                  u32 mss, u32 vlan)
7677 {
7678         txbd->addr_hi = ((u64) mapping >> 32);
7679         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7680         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7681         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7682 }
7683
7684 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7685                             dma_addr_t map, u32 len, u32 flags,
7686                             u32 mss, u32 vlan)
7687 {
7688         struct tg3 *tp = tnapi->tp;
7689         bool hwbug = false;
7690
7691         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7692                 hwbug = true;
7693
7694         if (tg3_4g_overflow_test(map, len))
7695                 hwbug = true;
7696
7697         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7698                 hwbug = true;
7699
7700         if (tg3_40bit_overflow_test(tp, map, len))
7701                 hwbug = true;
7702
7703         if (tp->dma_limit) {
7704                 u32 prvidx = *entry;
7705                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7706                 while (len > tp->dma_limit && *budget) {
7707                         u32 frag_len = tp->dma_limit;
7708                         len -= tp->dma_limit;
7709
7710                         /* Avoid the 8byte DMA problem */
7711                         if (len <= 8) {
7712                                 len += tp->dma_limit / 2;
7713                                 frag_len = tp->dma_limit / 2;
7714                         }
7715
7716                         tnapi->tx_buffers[*entry].fragmented = true;
7717
7718                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7719                                       frag_len, tmp_flag, mss, vlan);
7720                         *budget -= 1;
7721                         prvidx = *entry;
7722                         *entry = NEXT_TX(*entry);
7723
7724                         map += frag_len;
7725                 }
7726
7727                 if (len) {
7728                         if (*budget) {
7729                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7730                                               len, flags, mss, vlan);
7731                                 *budget -= 1;
7732                                 *entry = NEXT_TX(*entry);
7733                         } else {
7734                                 hwbug = true;
7735                                 tnapi->tx_buffers[prvidx].fragmented = false;
7736                         }
7737                 }
7738         } else {
7739                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7740                               len, flags, mss, vlan);
7741                 *entry = NEXT_TX(*entry);
7742         }
7743
7744         return hwbug;
7745 }
7746
7747 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7748 {
7749         int i;
7750         struct sk_buff *skb;
7751         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7752
7753         skb = txb->skb;
7754         txb->skb = NULL;
7755
7756         dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7757                          skb_headlen(skb), DMA_TO_DEVICE);
7758
7759         while (txb->fragmented) {
7760                 txb->fragmented = false;
7761                 entry = NEXT_TX(entry);
7762                 txb = &tnapi->tx_buffers[entry];
7763         }
7764
7765         for (i = 0; i <= last; i++) {
7766                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7767
7768                 entry = NEXT_TX(entry);
7769                 txb = &tnapi->tx_buffers[entry];
7770
7771                 dma_unmap_page(&tnapi->tp->pdev->dev,
7772                                dma_unmap_addr(txb, mapping),
7773                                skb_frag_size(frag), DMA_TO_DEVICE);
7774
7775                 while (txb->fragmented) {
7776                         txb->fragmented = false;
7777                         entry = NEXT_TX(entry);
7778                         txb = &tnapi->tx_buffers[entry];
7779                 }
7780         }
7781 }
7782
7783 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7784 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7785                                        struct sk_buff **pskb,
7786                                        u32 *entry, u32 *budget,
7787                                        u32 base_flags, u32 mss, u32 vlan)
7788 {
7789         struct tg3 *tp = tnapi->tp;
7790         struct sk_buff *new_skb, *skb = *pskb;
7791         dma_addr_t new_addr = 0;
7792         int ret = 0;
7793
7794         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7795                 new_skb = skb_copy(skb, GFP_ATOMIC);
7796         else {
7797                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7798
7799                 new_skb = skb_copy_expand(skb,
7800                                           skb_headroom(skb) + more_headroom,
7801                                           skb_tailroom(skb), GFP_ATOMIC);
7802         }
7803
7804         if (!new_skb) {
7805                 ret = -1;
7806         } else {
7807                 /* New SKB is guaranteed to be linear. */
7808                 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7809                                           new_skb->len, DMA_TO_DEVICE);
7810                 /* Make sure the mapping succeeded */
7811                 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7812                         dev_kfree_skb_any(new_skb);
7813                         ret = -1;
7814                 } else {
7815                         u32 save_entry = *entry;
7816
7817                         base_flags |= TXD_FLAG_END;
7818
7819                         tnapi->tx_buffers[*entry].skb = new_skb;
7820                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7821                                            mapping, new_addr);
7822
7823                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7824                                             new_skb->len, base_flags,
7825                                             mss, vlan)) {
7826                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7827                                 dev_kfree_skb_any(new_skb);
7828                                 ret = -1;
7829                         }
7830                 }
7831         }
7832
7833         dev_consume_skb_any(skb);
7834         *pskb = new_skb;
7835         return ret;
7836 }
7837
7838 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7839 {
7840         /* Check if we will never have enough descriptors,
7841          * as gso_segs can be more than current ring size
7842          */
7843         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7844 }
7845
7846 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7847
7848 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7849  * indicated in tg3_tx_frag_set()
7850  */
7851 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7852                        struct netdev_queue *txq, struct sk_buff *skb)
7853 {
7854         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7855         struct sk_buff *segs, *seg, *next;
7856
7857         /* Estimate the number of fragments in the worst case */
7858         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7859                 netif_tx_stop_queue(txq);
7860
7861                 /* netif_tx_stop_queue() must be done before checking
7862                  * checking tx index in tg3_tx_avail() below, because in
7863                  * tg3_tx(), we update tx index before checking for
7864                  * netif_tx_queue_stopped().
7865                  */
7866                 smp_mb();
7867                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7868                         return NETDEV_TX_BUSY;
7869
7870                 netif_tx_wake_queue(txq);
7871         }
7872
7873         segs = skb_gso_segment(skb, tp->dev->features &
7874                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7875         if (IS_ERR(segs) || !segs)
7876                 goto tg3_tso_bug_end;
7877
7878         skb_list_walk_safe(segs, seg, next) {
7879                 skb_mark_not_on_list(seg);
7880                 tg3_start_xmit(seg, tp->dev);
7881         }
7882
7883 tg3_tso_bug_end:
7884         dev_consume_skb_any(skb);
7885
7886         return NETDEV_TX_OK;
7887 }
7888
7889 /* hard_start_xmit for all devices */
7890 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7891 {
7892         struct tg3 *tp = netdev_priv(dev);
7893         u32 len, entry, base_flags, mss, vlan = 0;
7894         u32 budget;
7895         int i = -1, would_hit_hwbug;
7896         dma_addr_t mapping;
7897         struct tg3_napi *tnapi;
7898         struct netdev_queue *txq;
7899         unsigned int last;
7900         struct iphdr *iph = NULL;
7901         struct tcphdr *tcph = NULL;
7902         __sum16 tcp_csum = 0, ip_csum = 0;
7903         __be16 ip_tot_len = 0;
7904
7905         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7906         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7907         if (tg3_flag(tp, ENABLE_TSS))
7908                 tnapi++;
7909
7910         budget = tg3_tx_avail(tnapi);
7911
7912         /* We are running in BH disabled context with netif_tx_lock
7913          * and TX reclaim runs via tp->napi.poll inside of a software
7914          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7915          * no IRQ context deadlocks to worry about either.  Rejoice!
7916          */
7917         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7918                 if (!netif_tx_queue_stopped(txq)) {
7919                         netif_tx_stop_queue(txq);
7920
7921                         /* This is a hard error, log it. */
7922                         netdev_err(dev,
7923                                    "BUG! Tx Ring full when queue awake!\n");
7924                 }
7925                 return NETDEV_TX_BUSY;
7926         }
7927
7928         entry = tnapi->tx_prod;
7929         base_flags = 0;
7930
7931         mss = skb_shinfo(skb)->gso_size;
7932         if (mss) {
7933                 u32 tcp_opt_len, hdr_len;
7934
7935                 if (skb_cow_head(skb, 0))
7936                         goto drop;
7937
7938                 iph = ip_hdr(skb);
7939                 tcp_opt_len = tcp_optlen(skb);
7940
7941                 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7942
7943                 /* HW/FW can not correctly segment packets that have been
7944                  * vlan encapsulated.
7945                  */
7946                 if (skb->protocol == htons(ETH_P_8021Q) ||
7947                     skb->protocol == htons(ETH_P_8021AD)) {
7948                         if (tg3_tso_bug_gso_check(tnapi, skb))
7949                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7950                         goto drop;
7951                 }
7952
7953                 if (!skb_is_gso_v6(skb)) {
7954                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7955                             tg3_flag(tp, TSO_BUG)) {
7956                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7957                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7958                                 goto drop;
7959                         }
7960                         ip_csum = iph->check;
7961                         ip_tot_len = iph->tot_len;
7962                         iph->check = 0;
7963                         iph->tot_len = htons(mss + hdr_len);
7964                 }
7965
7966                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7967                                TXD_FLAG_CPU_POST_DMA);
7968
7969                 tcph = tcp_hdr(skb);
7970                 tcp_csum = tcph->check;
7971
7972                 if (tg3_flag(tp, HW_TSO_1) ||
7973                     tg3_flag(tp, HW_TSO_2) ||
7974                     tg3_flag(tp, HW_TSO_3)) {
7975                         tcph->check = 0;
7976                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7977                 } else {
7978                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7979                                                          0, IPPROTO_TCP, 0);
7980                 }
7981
7982                 if (tg3_flag(tp, HW_TSO_3)) {
7983                         mss |= (hdr_len & 0xc) << 12;
7984                         if (hdr_len & 0x10)
7985                                 base_flags |= 0x00000010;
7986                         base_flags |= (hdr_len & 0x3e0) << 5;
7987                 } else if (tg3_flag(tp, HW_TSO_2))
7988                         mss |= hdr_len << 9;
7989                 else if (tg3_flag(tp, HW_TSO_1) ||
7990                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7991                         if (tcp_opt_len || iph->ihl > 5) {
7992                                 int tsflags;
7993
7994                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7995                                 mss |= (tsflags << 11);
7996                         }
7997                 } else {
7998                         if (tcp_opt_len || iph->ihl > 5) {
7999                                 int tsflags;
8000
8001                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8002                                 base_flags |= tsflags << 12;
8003                         }
8004                 }
8005         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8006                 /* HW/FW can not correctly checksum packets that have been
8007                  * vlan encapsulated.
8008                  */
8009                 if (skb->protocol == htons(ETH_P_8021Q) ||
8010                     skb->protocol == htons(ETH_P_8021AD)) {
8011                         if (skb_checksum_help(skb))
8012                                 goto drop;
8013                 } else  {
8014                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8015                 }
8016         }
8017
8018         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8019             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8020                 base_flags |= TXD_FLAG_JMB_PKT;
8021
8022         if (skb_vlan_tag_present(skb)) {
8023                 base_flags |= TXD_FLAG_VLAN;
8024                 vlan = skb_vlan_tag_get(skb);
8025         }
8026
8027         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8028             tg3_flag(tp, TX_TSTAMP_EN)) {
8029                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8030                 base_flags |= TXD_FLAG_HWTSTAMP;
8031         }
8032
8033         len = skb_headlen(skb);
8034
8035         mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8036                                  DMA_TO_DEVICE);
8037         if (dma_mapping_error(&tp->pdev->dev, mapping))
8038                 goto drop;
8039
8040
8041         tnapi->tx_buffers[entry].skb = skb;
8042         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8043
8044         would_hit_hwbug = 0;
8045
8046         if (tg3_flag(tp, 5701_DMA_BUG))
8047                 would_hit_hwbug = 1;
8048
8049         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8050                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8051                             mss, vlan)) {
8052                 would_hit_hwbug = 1;
8053         } else if (skb_shinfo(skb)->nr_frags > 0) {
8054                 u32 tmp_mss = mss;
8055
8056                 if (!tg3_flag(tp, HW_TSO_1) &&
8057                     !tg3_flag(tp, HW_TSO_2) &&
8058                     !tg3_flag(tp, HW_TSO_3))
8059                         tmp_mss = 0;
8060
8061                 /* Now loop through additional data
8062                  * fragments, and queue them.
8063                  */
8064                 last = skb_shinfo(skb)->nr_frags - 1;
8065                 for (i = 0; i <= last; i++) {
8066                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8067
8068                         len = skb_frag_size(frag);
8069                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8070                                                    len, DMA_TO_DEVICE);
8071
8072                         tnapi->tx_buffers[entry].skb = NULL;
8073                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8074                                            mapping);
8075                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8076                                 goto dma_error;
8077
8078                         if (!budget ||
8079                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8080                                             len, base_flags |
8081                                             ((i == last) ? TXD_FLAG_END : 0),
8082                                             tmp_mss, vlan)) {
8083                                 would_hit_hwbug = 1;
8084                                 break;
8085                         }
8086                 }
8087         }
8088
8089         if (would_hit_hwbug) {
8090                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8091
8092                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8093                         /* If it's a TSO packet, do GSO instead of
8094                          * allocating and copying to a large linear SKB
8095                          */
8096                         if (ip_tot_len) {
8097                                 iph->check = ip_csum;
8098                                 iph->tot_len = ip_tot_len;
8099                         }
8100                         tcph->check = tcp_csum;
8101                         return tg3_tso_bug(tp, tnapi, txq, skb);
8102                 }
8103
8104                 /* If the workaround fails due to memory/mapping
8105                  * failure, silently drop this packet.
8106                  */
8107                 entry = tnapi->tx_prod;
8108                 budget = tg3_tx_avail(tnapi);
8109                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8110                                                 base_flags, mss, vlan))
8111                         goto drop_nofree;
8112         }
8113
8114         skb_tx_timestamp(skb);
8115         netdev_tx_sent_queue(txq, skb->len);
8116
8117         /* Sync BD data before updating mailbox */
8118         wmb();
8119
8120         tnapi->tx_prod = entry;
8121         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8122                 netif_tx_stop_queue(txq);
8123
8124                 /* netif_tx_stop_queue() must be done before checking
8125                  * checking tx index in tg3_tx_avail() below, because in
8126                  * tg3_tx(), we update tx index before checking for
8127                  * netif_tx_queue_stopped().
8128                  */
8129                 smp_mb();
8130                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8131                         netif_tx_wake_queue(txq);
8132         }
8133
8134         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8135                 /* Packets are ready, update Tx producer idx on card. */
8136                 tw32_tx_mbox(tnapi->prodmbox, entry);
8137         }
8138
8139         return NETDEV_TX_OK;
8140
8141 dma_error:
8142         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8143         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8144 drop:
8145         dev_kfree_skb_any(skb);
8146 drop_nofree:
8147         tp->tx_dropped++;
8148         return NETDEV_TX_OK;
8149 }
8150
8151 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8152 {
8153         if (enable) {
8154                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8155                                   MAC_MODE_PORT_MODE_MASK);
8156
8157                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8158
8159                 if (!tg3_flag(tp, 5705_PLUS))
8160                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8161
8162                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8163                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8164                 else
8165                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8166         } else {
8167                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8168
8169                 if (tg3_flag(tp, 5705_PLUS) ||
8170                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8171                     tg3_asic_rev(tp) == ASIC_REV_5700)
8172                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8173         }
8174
8175         tw32(MAC_MODE, tp->mac_mode);
8176         udelay(40);
8177 }
8178
8179 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8180 {
8181         u32 val, bmcr, mac_mode, ptest = 0;
8182
8183         tg3_phy_toggle_apd(tp, false);
8184         tg3_phy_toggle_automdix(tp, false);
8185
8186         if (extlpbk && tg3_phy_set_extloopbk(tp))
8187                 return -EIO;
8188
8189         bmcr = BMCR_FULLDPLX;
8190         switch (speed) {
8191         case SPEED_10:
8192                 break;
8193         case SPEED_100:
8194                 bmcr |= BMCR_SPEED100;
8195                 break;
8196         case SPEED_1000:
8197         default:
8198                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8199                         speed = SPEED_100;
8200                         bmcr |= BMCR_SPEED100;
8201                 } else {
8202                         speed = SPEED_1000;
8203                         bmcr |= BMCR_SPEED1000;
8204                 }
8205         }
8206
8207         if (extlpbk) {
8208                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8209                         tg3_readphy(tp, MII_CTRL1000, &val);
8210                         val |= CTL1000_AS_MASTER |
8211                                CTL1000_ENABLE_MASTER;
8212                         tg3_writephy(tp, MII_CTRL1000, val);
8213                 } else {
8214                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8215                                 MII_TG3_FET_PTEST_TRIM_2;
8216                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8217                 }
8218         } else
8219                 bmcr |= BMCR_LOOPBACK;
8220
8221         tg3_writephy(tp, MII_BMCR, bmcr);
8222
8223         /* The write needs to be flushed for the FETs */
8224         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8225                 tg3_readphy(tp, MII_BMCR, &bmcr);
8226
8227         udelay(40);
8228
8229         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8230             tg3_asic_rev(tp) == ASIC_REV_5785) {
8231                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8232                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8233                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8234
8235                 /* The write needs to be flushed for the AC131 */
8236                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8237         }
8238
8239         /* Reset to prevent losing 1st rx packet intermittently */
8240         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8241             tg3_flag(tp, 5780_CLASS)) {
8242                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8243                 udelay(10);
8244                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8245         }
8246
8247         mac_mode = tp->mac_mode &
8248                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8249         if (speed == SPEED_1000)
8250                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8251         else
8252                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8253
8254         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8255                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8256
8257                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8258                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8259                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8260                         mac_mode |= MAC_MODE_LINK_POLARITY;
8261
8262                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8263                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8264         }
8265
8266         tw32(MAC_MODE, mac_mode);
8267         udelay(40);
8268
8269         return 0;
8270 }
8271
8272 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8273 {
8274         struct tg3 *tp = netdev_priv(dev);
8275
8276         if (features & NETIF_F_LOOPBACK) {
8277                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8278                         return;
8279
8280                 spin_lock_bh(&tp->lock);
8281                 tg3_mac_loopback(tp, true);
8282                 netif_carrier_on(tp->dev);
8283                 spin_unlock_bh(&tp->lock);
8284                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8285         } else {
8286                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8287                         return;
8288
8289                 spin_lock_bh(&tp->lock);
8290                 tg3_mac_loopback(tp, false);
8291                 /* Force link status check */
8292                 tg3_setup_phy(tp, true);
8293                 spin_unlock_bh(&tp->lock);
8294                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8295         }
8296 }
8297
8298 static netdev_features_t tg3_fix_features(struct net_device *dev,
8299         netdev_features_t features)
8300 {
8301         struct tg3 *tp = netdev_priv(dev);
8302
8303         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8304                 features &= ~NETIF_F_ALL_TSO;
8305
8306         return features;
8307 }
8308
8309 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8310 {
8311         netdev_features_t changed = dev->features ^ features;
8312
8313         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8314                 tg3_set_loopback(dev, features);
8315
8316         return 0;
8317 }
8318
8319 static void tg3_rx_prodring_free(struct tg3 *tp,
8320                                  struct tg3_rx_prodring_set *tpr)
8321 {
8322         int i;
8323
8324         if (tpr != &tp->napi[0].prodring) {
8325                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8326                      i = (i + 1) & tp->rx_std_ring_mask)
8327                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8328                                         tp->rx_pkt_map_sz);
8329
8330                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8331                         for (i = tpr->rx_jmb_cons_idx;
8332                              i != tpr->rx_jmb_prod_idx;
8333                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8334                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8335                                                 TG3_RX_JMB_MAP_SZ);
8336                         }
8337                 }
8338
8339                 return;
8340         }
8341
8342         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8343                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344                                 tp->rx_pkt_map_sz);
8345
8346         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8347                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8348                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8349                                         TG3_RX_JMB_MAP_SZ);
8350         }
8351 }
8352
8353 /* Initialize rx rings for packet processing.
8354  *
8355  * The chip has been shut down and the driver detached from
8356  * the networking, so no interrupts or new tx packets will
8357  * end up in the driver.  tp->{tx,}lock are held and thus
8358  * we may not sleep.
8359  */
8360 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8361                                  struct tg3_rx_prodring_set *tpr)
8362 {
8363         u32 i, rx_pkt_dma_sz;
8364
8365         tpr->rx_std_cons_idx = 0;
8366         tpr->rx_std_prod_idx = 0;
8367         tpr->rx_jmb_cons_idx = 0;
8368         tpr->rx_jmb_prod_idx = 0;
8369
8370         if (tpr != &tp->napi[0].prodring) {
8371                 memset(&tpr->rx_std_buffers[0], 0,
8372                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8373                 if (tpr->rx_jmb_buffers)
8374                         memset(&tpr->rx_jmb_buffers[0], 0,
8375                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8376                 goto done;
8377         }
8378
8379         /* Zero out all descriptors. */
8380         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8381
8382         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8383         if (tg3_flag(tp, 5780_CLASS) &&
8384             tp->dev->mtu > ETH_DATA_LEN)
8385                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8386         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8387
8388         /* Initialize invariants of the rings, we only set this
8389          * stuff once.  This works because the card does not
8390          * write into the rx buffer posting rings.
8391          */
8392         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8393                 struct tg3_rx_buffer_desc *rxd;
8394
8395                 rxd = &tpr->rx_std[i];
8396                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8397                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8398                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8399                                (i << RXD_OPAQUE_INDEX_SHIFT));
8400         }
8401
8402         /* Now allocate fresh SKBs for each rx ring. */
8403         for (i = 0; i < tp->rx_pending; i++) {
8404                 unsigned int frag_size;
8405
8406                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8407                                       &frag_size) < 0) {
8408                         netdev_warn(tp->dev,
8409                                     "Using a smaller RX standard ring. Only "
8410                                     "%d out of %d buffers were allocated "
8411                                     "successfully\n", i, tp->rx_pending);
8412                         if (i == 0)
8413                                 goto initfail;
8414                         tp->rx_pending = i;
8415                         break;
8416                 }
8417         }
8418
8419         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8420                 goto done;
8421
8422         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8423
8424         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8425                 goto done;
8426
8427         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8428                 struct tg3_rx_buffer_desc *rxd;
8429
8430                 rxd = &tpr->rx_jmb[i].std;
8431                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8432                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8433                                   RXD_FLAG_JUMBO;
8434                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8435                        (i << RXD_OPAQUE_INDEX_SHIFT));
8436         }
8437
8438         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8439                 unsigned int frag_size;
8440
8441                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8442                                       &frag_size) < 0) {
8443                         netdev_warn(tp->dev,
8444                                     "Using a smaller RX jumbo ring. Only %d "
8445                                     "out of %d buffers were allocated "
8446                                     "successfully\n", i, tp->rx_jumbo_pending);
8447                         if (i == 0)
8448                                 goto initfail;
8449                         tp->rx_jumbo_pending = i;
8450                         break;
8451                 }
8452         }
8453
8454 done:
8455         return 0;
8456
8457 initfail:
8458         tg3_rx_prodring_free(tp, tpr);
8459         return -ENOMEM;
8460 }
8461
8462 static void tg3_rx_prodring_fini(struct tg3 *tp,
8463                                  struct tg3_rx_prodring_set *tpr)
8464 {
8465         kfree(tpr->rx_std_buffers);
8466         tpr->rx_std_buffers = NULL;
8467         kfree(tpr->rx_jmb_buffers);
8468         tpr->rx_jmb_buffers = NULL;
8469         if (tpr->rx_std) {
8470                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8471                                   tpr->rx_std, tpr->rx_std_mapping);
8472                 tpr->rx_std = NULL;
8473         }
8474         if (tpr->rx_jmb) {
8475                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8476                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8477                 tpr->rx_jmb = NULL;
8478         }
8479 }
8480
8481 static int tg3_rx_prodring_init(struct tg3 *tp,
8482                                 struct tg3_rx_prodring_set *tpr)
8483 {
8484         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8485                                       GFP_KERNEL);
8486         if (!tpr->rx_std_buffers)
8487                 return -ENOMEM;
8488
8489         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8490                                          TG3_RX_STD_RING_BYTES(tp),
8491                                          &tpr->rx_std_mapping,
8492                                          GFP_KERNEL);
8493         if (!tpr->rx_std)
8494                 goto err_out;
8495
8496         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8497                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8498                                               GFP_KERNEL);
8499                 if (!tpr->rx_jmb_buffers)
8500                         goto err_out;
8501
8502                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8503                                                  TG3_RX_JMB_RING_BYTES(tp),
8504                                                  &tpr->rx_jmb_mapping,
8505                                                  GFP_KERNEL);
8506                 if (!tpr->rx_jmb)
8507                         goto err_out;
8508         }
8509
8510         return 0;
8511
8512 err_out:
8513         tg3_rx_prodring_fini(tp, tpr);
8514         return -ENOMEM;
8515 }
8516
8517 /* Free up pending packets in all rx/tx rings.
8518  *
8519  * The chip has been shut down and the driver detached from
8520  * the networking, so no interrupts or new tx packets will
8521  * end up in the driver.  tp->{tx,}lock is not held and we are not
8522  * in an interrupt context and thus may sleep.
8523  */
8524 static void tg3_free_rings(struct tg3 *tp)
8525 {
8526         int i, j;
8527
8528         for (j = 0; j < tp->irq_cnt; j++) {
8529                 struct tg3_napi *tnapi = &tp->napi[j];
8530
8531                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8532
8533                 if (!tnapi->tx_buffers)
8534                         continue;
8535
8536                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8537                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8538
8539                         if (!skb)
8540                                 continue;
8541
8542                         tg3_tx_skb_unmap(tnapi, i,
8543                                          skb_shinfo(skb)->nr_frags - 1);
8544
8545                         dev_consume_skb_any(skb);
8546                 }
8547                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8548         }
8549 }
8550
8551 /* Initialize tx/rx rings for packet processing.
8552  *
8553  * The chip has been shut down and the driver detached from
8554  * the networking, so no interrupts or new tx packets will
8555  * end up in the driver.  tp->{tx,}lock are held and thus
8556  * we may not sleep.
8557  */
8558 static int tg3_init_rings(struct tg3 *tp)
8559 {
8560         int i;
8561
8562         /* Free up all the SKBs. */
8563         tg3_free_rings(tp);
8564
8565         for (i = 0; i < tp->irq_cnt; i++) {
8566                 struct tg3_napi *tnapi = &tp->napi[i];
8567
8568                 tnapi->last_tag = 0;
8569                 tnapi->last_irq_tag = 0;
8570                 tnapi->hw_status->status = 0;
8571                 tnapi->hw_status->status_tag = 0;
8572                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8573
8574                 tnapi->tx_prod = 0;
8575                 tnapi->tx_cons = 0;
8576                 if (tnapi->tx_ring)
8577                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8578
8579                 tnapi->rx_rcb_ptr = 0;
8580                 if (tnapi->rx_rcb)
8581                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8582
8583                 if (tnapi->prodring.rx_std &&
8584                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8585                         tg3_free_rings(tp);
8586                         return -ENOMEM;
8587                 }
8588         }
8589
8590         return 0;
8591 }
8592
8593 static void tg3_mem_tx_release(struct tg3 *tp)
8594 {
8595         int i;
8596
8597         for (i = 0; i < tp->irq_max; i++) {
8598                 struct tg3_napi *tnapi = &tp->napi[i];
8599
8600                 if (tnapi->tx_ring) {
8601                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8602                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8603                         tnapi->tx_ring = NULL;
8604                 }
8605
8606                 kfree(tnapi->tx_buffers);
8607                 tnapi->tx_buffers = NULL;
8608         }
8609 }
8610
8611 static int tg3_mem_tx_acquire(struct tg3 *tp)
8612 {
8613         int i;
8614         struct tg3_napi *tnapi = &tp->napi[0];
8615
8616         /* If multivector TSS is enabled, vector 0 does not handle
8617          * tx interrupts.  Don't allocate any resources for it.
8618          */
8619         if (tg3_flag(tp, ENABLE_TSS))
8620                 tnapi++;
8621
8622         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8623                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8624                                             sizeof(struct tg3_tx_ring_info),
8625                                             GFP_KERNEL);
8626                 if (!tnapi->tx_buffers)
8627                         goto err_out;
8628
8629                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8630                                                     TG3_TX_RING_BYTES,
8631                                                     &tnapi->tx_desc_mapping,
8632                                                     GFP_KERNEL);
8633                 if (!tnapi->tx_ring)
8634                         goto err_out;
8635         }
8636
8637         return 0;
8638
8639 err_out:
8640         tg3_mem_tx_release(tp);
8641         return -ENOMEM;
8642 }
8643
8644 static void tg3_mem_rx_release(struct tg3 *tp)
8645 {
8646         int i;
8647
8648         for (i = 0; i < tp->irq_max; i++) {
8649                 struct tg3_napi *tnapi = &tp->napi[i];
8650
8651                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8652
8653                 if (!tnapi->rx_rcb)
8654                         continue;
8655
8656                 dma_free_coherent(&tp->pdev->dev,
8657                                   TG3_RX_RCB_RING_BYTES(tp),
8658                                   tnapi->rx_rcb,
8659                                   tnapi->rx_rcb_mapping);
8660                 tnapi->rx_rcb = NULL;
8661         }
8662 }
8663
8664 static int tg3_mem_rx_acquire(struct tg3 *tp)
8665 {
8666         unsigned int i, limit;
8667
8668         limit = tp->rxq_cnt;
8669
8670         /* If RSS is enabled, we need a (dummy) producer ring
8671          * set on vector zero.  This is the true hw prodring.
8672          */
8673         if (tg3_flag(tp, ENABLE_RSS))
8674                 limit++;
8675
8676         for (i = 0; i < limit; i++) {
8677                 struct tg3_napi *tnapi = &tp->napi[i];
8678
8679                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8680                         goto err_out;
8681
8682                 /* If multivector RSS is enabled, vector 0
8683                  * does not handle rx or tx interrupts.
8684                  * Don't allocate any resources for it.
8685                  */
8686                 if (!i && tg3_flag(tp, ENABLE_RSS))
8687                         continue;
8688
8689                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8690                                                    TG3_RX_RCB_RING_BYTES(tp),
8691                                                    &tnapi->rx_rcb_mapping,
8692                                                    GFP_KERNEL);
8693                 if (!tnapi->rx_rcb)
8694                         goto err_out;
8695         }
8696
8697         return 0;
8698
8699 err_out:
8700         tg3_mem_rx_release(tp);
8701         return -ENOMEM;
8702 }
8703
8704 /*
8705  * Must not be invoked with interrupt sources disabled and
8706  * the hardware shutdown down.
8707  */
8708 static void tg3_free_consistent(struct tg3 *tp)
8709 {
8710         int i;
8711
8712         for (i = 0; i < tp->irq_cnt; i++) {
8713                 struct tg3_napi *tnapi = &tp->napi[i];
8714
8715                 if (tnapi->hw_status) {
8716                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8717                                           tnapi->hw_status,
8718                                           tnapi->status_mapping);
8719                         tnapi->hw_status = NULL;
8720                 }
8721         }
8722
8723         tg3_mem_rx_release(tp);
8724         tg3_mem_tx_release(tp);
8725
8726         /* tp->hw_stats can be referenced safely:
8727          *     1. under rtnl_lock
8728          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8729          */
8730         if (tp->hw_stats) {
8731                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8732                                   tp->hw_stats, tp->stats_mapping);
8733                 tp->hw_stats = NULL;
8734         }
8735 }
8736
8737 /*
8738  * Must not be invoked with interrupt sources disabled and
8739  * the hardware shutdown down.  Can sleep.
8740  */
8741 static int tg3_alloc_consistent(struct tg3 *tp)
8742 {
8743         int i;
8744
8745         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8746                                           sizeof(struct tg3_hw_stats),
8747                                           &tp->stats_mapping, GFP_KERNEL);
8748         if (!tp->hw_stats)
8749                 goto err_out;
8750
8751         for (i = 0; i < tp->irq_cnt; i++) {
8752                 struct tg3_napi *tnapi = &tp->napi[i];
8753                 struct tg3_hw_status *sblk;
8754
8755                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8756                                                       TG3_HW_STATUS_SIZE,
8757                                                       &tnapi->status_mapping,
8758                                                       GFP_KERNEL);
8759                 if (!tnapi->hw_status)
8760                         goto err_out;
8761
8762                 sblk = tnapi->hw_status;
8763
8764                 if (tg3_flag(tp, ENABLE_RSS)) {
8765                         u16 *prodptr = NULL;
8766
8767                         /*
8768                          * When RSS is enabled, the status block format changes
8769                          * slightly.  The "rx_jumbo_consumer", "reserved",
8770                          * and "rx_mini_consumer" members get mapped to the
8771                          * other three rx return ring producer indexes.
8772                          */
8773                         switch (i) {
8774                         case 1:
8775                                 prodptr = &sblk->idx[0].rx_producer;
8776                                 break;
8777                         case 2:
8778                                 prodptr = &sblk->rx_jumbo_consumer;
8779                                 break;
8780                         case 3:
8781                                 prodptr = &sblk->reserved;
8782                                 break;
8783                         case 4:
8784                                 prodptr = &sblk->rx_mini_consumer;
8785                                 break;
8786                         }
8787                         tnapi->rx_rcb_prod_idx = prodptr;
8788                 } else {
8789                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8790                 }
8791         }
8792
8793         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8794                 goto err_out;
8795
8796         return 0;
8797
8798 err_out:
8799         tg3_free_consistent(tp);
8800         return -ENOMEM;
8801 }
8802
8803 #define MAX_WAIT_CNT 1000
8804
8805 /* To stop a block, clear the enable bit and poll till it
8806  * clears.  tp->lock is held.
8807  */
8808 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8809 {
8810         unsigned int i;
8811         u32 val;
8812
8813         if (tg3_flag(tp, 5705_PLUS)) {
8814                 switch (ofs) {
8815                 case RCVLSC_MODE:
8816                 case DMAC_MODE:
8817                 case MBFREE_MODE:
8818                 case BUFMGR_MODE:
8819                 case MEMARB_MODE:
8820                         /* We can't enable/disable these bits of the
8821                          * 5705/5750, just say success.
8822                          */
8823                         return 0;
8824
8825                 default:
8826                         break;
8827                 }
8828         }
8829
8830         val = tr32(ofs);
8831         val &= ~enable_bit;
8832         tw32_f(ofs, val);
8833
8834         for (i = 0; i < MAX_WAIT_CNT; i++) {
8835                 if (pci_channel_offline(tp->pdev)) {
8836                         dev_err(&tp->pdev->dev,
8837                                 "tg3_stop_block device offline, "
8838                                 "ofs=%lx enable_bit=%x\n",
8839                                 ofs, enable_bit);
8840                         return -ENODEV;
8841                 }
8842
8843                 udelay(100);
8844                 val = tr32(ofs);
8845                 if ((val & enable_bit) == 0)
8846                         break;
8847         }
8848
8849         if (i == MAX_WAIT_CNT && !silent) {
8850                 dev_err(&tp->pdev->dev,
8851                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8852                         ofs, enable_bit);
8853                 return -ENODEV;
8854         }
8855
8856         return 0;
8857 }
8858
8859 /* tp->lock is held. */
8860 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8861 {
8862         int i, err;
8863
8864         tg3_disable_ints(tp);
8865
8866         if (pci_channel_offline(tp->pdev)) {
8867                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8868                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8869                 err = -ENODEV;
8870                 goto err_no_dev;
8871         }
8872
8873         tp->rx_mode &= ~RX_MODE_ENABLE;
8874         tw32_f(MAC_RX_MODE, tp->rx_mode);
8875         udelay(10);
8876
8877         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8882         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8883
8884         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8885         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8887         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8890         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8891
8892         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8893         tw32_f(MAC_MODE, tp->mac_mode);
8894         udelay(40);
8895
8896         tp->tx_mode &= ~TX_MODE_ENABLE;
8897         tw32_f(MAC_TX_MODE, tp->tx_mode);
8898
8899         for (i = 0; i < MAX_WAIT_CNT; i++) {
8900                 udelay(100);
8901                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8902                         break;
8903         }
8904         if (i >= MAX_WAIT_CNT) {
8905                 dev_err(&tp->pdev->dev,
8906                         "%s timed out, TX_MODE_ENABLE will not clear "
8907                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8908                 err |= -ENODEV;
8909         }
8910
8911         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8912         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8913         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8914
8915         tw32(FTQ_RESET, 0xffffffff);
8916         tw32(FTQ_RESET, 0x00000000);
8917
8918         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8919         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8920
8921 err_no_dev:
8922         for (i = 0; i < tp->irq_cnt; i++) {
8923                 struct tg3_napi *tnapi = &tp->napi[i];
8924                 if (tnapi->hw_status)
8925                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8926         }
8927
8928         return err;
8929 }
8930
8931 /* Save PCI command register before chip reset */
8932 static void tg3_save_pci_state(struct tg3 *tp)
8933 {
8934         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8935 }
8936
8937 /* Restore PCI state after chip reset */
8938 static void tg3_restore_pci_state(struct tg3 *tp)
8939 {
8940         u32 val;
8941
8942         /* Re-enable indirect register accesses. */
8943         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8944                                tp->misc_host_ctrl);
8945
8946         /* Set MAX PCI retry to zero. */
8947         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8948         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8949             tg3_flag(tp, PCIX_MODE))
8950                 val |= PCISTATE_RETRY_SAME_DMA;
8951         /* Allow reads and writes to the APE register and memory space. */
8952         if (tg3_flag(tp, ENABLE_APE))
8953                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8954                        PCISTATE_ALLOW_APE_SHMEM_WR |
8955                        PCISTATE_ALLOW_APE_PSPACE_WR;
8956         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8957
8958         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8959
8960         if (!tg3_flag(tp, PCI_EXPRESS)) {
8961                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8962                                       tp->pci_cacheline_sz);
8963                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8964                                       tp->pci_lat_timer);
8965         }
8966
8967         /* Make sure PCI-X relaxed ordering bit is clear. */
8968         if (tg3_flag(tp, PCIX_MODE)) {
8969                 u16 pcix_cmd;
8970
8971                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8972                                      &pcix_cmd);
8973                 pcix_cmd &= ~PCI_X_CMD_ERO;
8974                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8975                                       pcix_cmd);
8976         }
8977
8978         if (tg3_flag(tp, 5780_CLASS)) {
8979
8980                 /* Chip reset on 5780 will reset MSI enable bit,
8981                  * so need to restore it.
8982                  */
8983                 if (tg3_flag(tp, USING_MSI)) {
8984                         u16 ctrl;
8985
8986                         pci_read_config_word(tp->pdev,
8987                                              tp->msi_cap + PCI_MSI_FLAGS,
8988                                              &ctrl);
8989                         pci_write_config_word(tp->pdev,
8990                                               tp->msi_cap + PCI_MSI_FLAGS,
8991                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8992                         val = tr32(MSGINT_MODE);
8993                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8994                 }
8995         }
8996 }
8997
8998 static void tg3_override_clk(struct tg3 *tp)
8999 {
9000         u32 val;
9001
9002         switch (tg3_asic_rev(tp)) {
9003         case ASIC_REV_5717:
9004                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9005                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9006                      TG3_CPMU_MAC_ORIDE_ENABLE);
9007                 break;
9008
9009         case ASIC_REV_5719:
9010         case ASIC_REV_5720:
9011                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9012                 break;
9013
9014         default:
9015                 return;
9016         }
9017 }
9018
9019 static void tg3_restore_clk(struct tg3 *tp)
9020 {
9021         u32 val;
9022
9023         switch (tg3_asic_rev(tp)) {
9024         case ASIC_REV_5717:
9025                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9026                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9027                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9028                 break;
9029
9030         case ASIC_REV_5719:
9031         case ASIC_REV_5720:
9032                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9033                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9034                 break;
9035
9036         default:
9037                 return;
9038         }
9039 }
9040
9041 /* tp->lock is held. */
9042 static int tg3_chip_reset(struct tg3 *tp)
9043         __releases(tp->lock)
9044         __acquires(tp->lock)
9045 {
9046         u32 val;
9047         void (*write_op)(struct tg3 *, u32, u32);
9048         int i, err;
9049
9050         if (!pci_device_is_present(tp->pdev))
9051                 return -ENODEV;
9052
9053         tg3_nvram_lock(tp);
9054
9055         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9056
9057         /* No matching tg3_nvram_unlock() after this because
9058          * chip reset below will undo the nvram lock.
9059          */
9060         tp->nvram_lock_cnt = 0;
9061
9062         /* GRC_MISC_CFG core clock reset will clear the memory
9063          * enable bit in PCI register 4 and the MSI enable bit
9064          * on some chips, so we save relevant registers here.
9065          */
9066         tg3_save_pci_state(tp);
9067
9068         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9069             tg3_flag(tp, 5755_PLUS))
9070                 tw32(GRC_FASTBOOT_PC, 0);
9071
9072         /*
9073          * We must avoid the readl() that normally takes place.
9074          * It locks machines, causes machine checks, and other
9075          * fun things.  So, temporarily disable the 5701
9076          * hardware workaround, while we do the reset.
9077          */
9078         write_op = tp->write32;
9079         if (write_op == tg3_write_flush_reg32)
9080                 tp->write32 = tg3_write32;
9081
9082         /* Prevent the irq handler from reading or writing PCI registers
9083          * during chip reset when the memory enable bit in the PCI command
9084          * register may be cleared.  The chip does not generate interrupt
9085          * at this time, but the irq handler may still be called due to irq
9086          * sharing or irqpoll.
9087          */
9088         tg3_flag_set(tp, CHIP_RESETTING);
9089         for (i = 0; i < tp->irq_cnt; i++) {
9090                 struct tg3_napi *tnapi = &tp->napi[i];
9091                 if (tnapi->hw_status) {
9092                         tnapi->hw_status->status = 0;
9093                         tnapi->hw_status->status_tag = 0;
9094                 }
9095                 tnapi->last_tag = 0;
9096                 tnapi->last_irq_tag = 0;
9097         }
9098         smp_mb();
9099
9100         tg3_full_unlock(tp);
9101
9102         for (i = 0; i < tp->irq_cnt; i++)
9103                 synchronize_irq(tp->napi[i].irq_vec);
9104
9105         tg3_full_lock(tp, 0);
9106
9107         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9108                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9109                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9110         }
9111
9112         /* do the reset */
9113         val = GRC_MISC_CFG_CORECLK_RESET;
9114
9115         if (tg3_flag(tp, PCI_EXPRESS)) {
9116                 /* Force PCIe 1.0a mode */
9117                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9118                     !tg3_flag(tp, 57765_PLUS) &&
9119                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9120                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9121                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9122
9123                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9124                         tw32(GRC_MISC_CFG, (1 << 29));
9125                         val |= (1 << 29);
9126                 }
9127         }
9128
9129         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9130                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9131                 tw32(GRC_VCPU_EXT_CTRL,
9132                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9133         }
9134
9135         /* Set the clock to the highest frequency to avoid timeouts. With link
9136          * aware mode, the clock speed could be slow and bootcode does not
9137          * complete within the expected time. Override the clock to allow the
9138          * bootcode to finish sooner and then restore it.
9139          */
9140         tg3_override_clk(tp);
9141
9142         /* Manage gphy power for all CPMU absent PCIe devices. */
9143         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9144                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9145
9146         tw32(GRC_MISC_CFG, val);
9147
9148         /* restore 5701 hardware bug workaround write method */
9149         tp->write32 = write_op;
9150
9151         /* Unfortunately, we have to delay before the PCI read back.
9152          * Some 575X chips even will not respond to a PCI cfg access
9153          * when the reset command is given to the chip.
9154          *
9155          * How do these hardware designers expect things to work
9156          * properly if the PCI write is posted for a long period
9157          * of time?  It is always necessary to have some method by
9158          * which a register read back can occur to push the write
9159          * out which does the reset.
9160          *
9161          * For most tg3 variants the trick below was working.
9162          * Ho hum...
9163          */
9164         udelay(120);
9165
9166         /* Flush PCI posted writes.  The normal MMIO registers
9167          * are inaccessible at this time so this is the only
9168          * way to make this reliably (actually, this is no longer
9169          * the case, see above).  I tried to use indirect
9170          * register read/write but this upset some 5701 variants.
9171          */
9172         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9173
9174         udelay(120);
9175
9176         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9177                 u16 val16;
9178
9179                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9180                         int j;
9181                         u32 cfg_val;
9182
9183                         /* Wait for link training to complete.  */
9184                         for (j = 0; j < 5000; j++)
9185                                 udelay(100);
9186
9187                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9188                         pci_write_config_dword(tp->pdev, 0xc4,
9189                                                cfg_val | (1 << 15));
9190                 }
9191
9192                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9193                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9194                 /*
9195                  * Older PCIe devices only support the 128 byte
9196                  * MPS setting.  Enforce the restriction.
9197                  */
9198                 if (!tg3_flag(tp, CPMU_PRESENT))
9199                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9200                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9201
9202                 /* Clear error status */
9203                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9204                                       PCI_EXP_DEVSTA_CED |
9205                                       PCI_EXP_DEVSTA_NFED |
9206                                       PCI_EXP_DEVSTA_FED |
9207                                       PCI_EXP_DEVSTA_URD);
9208         }
9209
9210         tg3_restore_pci_state(tp);
9211
9212         tg3_flag_clear(tp, CHIP_RESETTING);
9213         tg3_flag_clear(tp, ERROR_PROCESSED);
9214
9215         val = 0;
9216         if (tg3_flag(tp, 5780_CLASS))
9217                 val = tr32(MEMARB_MODE);
9218         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9219
9220         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9221                 tg3_stop_fw(tp);
9222                 tw32(0x5000, 0x400);
9223         }
9224
9225         if (tg3_flag(tp, IS_SSB_CORE)) {
9226                 /*
9227                  * BCM4785: In order to avoid repercussions from using
9228                  * potentially defective internal ROM, stop the Rx RISC CPU,
9229                  * which is not required.
9230                  */
9231                 tg3_stop_fw(tp);
9232                 tg3_halt_cpu(tp, RX_CPU_BASE);
9233         }
9234
9235         err = tg3_poll_fw(tp);
9236         if (err)
9237                 return err;
9238
9239         tw32(GRC_MODE, tp->grc_mode);
9240
9241         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9242                 val = tr32(0xc4);
9243
9244                 tw32(0xc4, val | (1 << 15));
9245         }
9246
9247         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9248             tg3_asic_rev(tp) == ASIC_REV_5705) {
9249                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9250                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9251                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9252                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9253         }
9254
9255         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9256                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9257                 val = tp->mac_mode;
9258         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9259                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9260                 val = tp->mac_mode;
9261         } else
9262                 val = 0;
9263
9264         tw32_f(MAC_MODE, val);
9265         udelay(40);
9266
9267         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9268
9269         tg3_mdio_start(tp);
9270
9271         if (tg3_flag(tp, PCI_EXPRESS) &&
9272             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9273             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9274             !tg3_flag(tp, 57765_PLUS)) {
9275                 val = tr32(0x7c00);
9276
9277                 tw32(0x7c00, val | (1 << 25));
9278         }
9279
9280         tg3_restore_clk(tp);
9281
9282         /* Increase the core clock speed to fix tx timeout issue for 5762
9283          * with 100Mbps link speed.
9284          */
9285         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9286                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9287                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9288                      TG3_CPMU_MAC_ORIDE_ENABLE);
9289         }
9290
9291         /* Reprobe ASF enable state.  */
9292         tg3_flag_clear(tp, ENABLE_ASF);
9293         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9294                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9295
9296         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9297         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9298         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9299                 u32 nic_cfg;
9300
9301                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9302                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9303                         tg3_flag_set(tp, ENABLE_ASF);
9304                         tp->last_event_jiffies = jiffies;
9305                         if (tg3_flag(tp, 5750_PLUS))
9306                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9307
9308                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9309                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9310                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9311                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9312                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9313                 }
9314         }
9315
9316         return 0;
9317 }
9318
9319 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9320 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9321 static void __tg3_set_rx_mode(struct net_device *);
9322
9323 /* tp->lock is held. */
9324 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9325 {
9326         int err;
9327
9328         tg3_stop_fw(tp);
9329
9330         tg3_write_sig_pre_reset(tp, kind);
9331
9332         tg3_abort_hw(tp, silent);
9333         err = tg3_chip_reset(tp);
9334
9335         __tg3_set_mac_addr(tp, false);
9336
9337         tg3_write_sig_legacy(tp, kind);
9338         tg3_write_sig_post_reset(tp, kind);
9339
9340         if (tp->hw_stats) {
9341                 /* Save the stats across chip resets... */
9342                 tg3_get_nstats(tp, &tp->net_stats_prev);
9343                 tg3_get_estats(tp, &tp->estats_prev);
9344
9345                 /* And make sure the next sample is new data */
9346                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9347         }
9348
9349         return err;
9350 }
9351
9352 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9353 {
9354         struct tg3 *tp = netdev_priv(dev);
9355         struct sockaddr *addr = p;
9356         int err = 0;
9357         bool skip_mac_1 = false;
9358
9359         if (!is_valid_ether_addr(addr->sa_data))
9360                 return -EADDRNOTAVAIL;
9361
9362         eth_hw_addr_set(dev, addr->sa_data);
9363
9364         if (!netif_running(dev))
9365                 return 0;
9366
9367         if (tg3_flag(tp, ENABLE_ASF)) {
9368                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9369
9370                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9371                 addr0_low = tr32(MAC_ADDR_0_LOW);
9372                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9373                 addr1_low = tr32(MAC_ADDR_1_LOW);
9374
9375                 /* Skip MAC addr 1 if ASF is using it. */
9376                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9377                     !(addr1_high == 0 && addr1_low == 0))
9378                         skip_mac_1 = true;
9379         }
9380         spin_lock_bh(&tp->lock);
9381         __tg3_set_mac_addr(tp, skip_mac_1);
9382         __tg3_set_rx_mode(dev);
9383         spin_unlock_bh(&tp->lock);
9384
9385         return err;
9386 }
9387
9388 /* tp->lock is held. */
9389 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9390                            dma_addr_t mapping, u32 maxlen_flags,
9391                            u32 nic_addr)
9392 {
9393         tg3_write_mem(tp,
9394                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9395                       ((u64) mapping >> 32));
9396         tg3_write_mem(tp,
9397                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9398                       ((u64) mapping & 0xffffffff));
9399         tg3_write_mem(tp,
9400                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9401                        maxlen_flags);
9402
9403         if (!tg3_flag(tp, 5705_PLUS))
9404                 tg3_write_mem(tp,
9405                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9406                               nic_addr);
9407 }
9408
9409
9410 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9411 {
9412         int i = 0;
9413
9414         if (!tg3_flag(tp, ENABLE_TSS)) {
9415                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9416                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9417                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9418         } else {
9419                 tw32(HOSTCC_TXCOL_TICKS, 0);
9420                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9421                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9422
9423                 for (; i < tp->txq_cnt; i++) {
9424                         u32 reg;
9425
9426                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9427                         tw32(reg, ec->tx_coalesce_usecs);
9428                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9429                         tw32(reg, ec->tx_max_coalesced_frames);
9430                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9431                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9432                 }
9433         }
9434
9435         for (; i < tp->irq_max - 1; i++) {
9436                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9437                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9438                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9439         }
9440 }
9441
9442 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9443 {
9444         int i = 0;
9445         u32 limit = tp->rxq_cnt;
9446
9447         if (!tg3_flag(tp, ENABLE_RSS)) {
9448                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9449                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9450                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9451                 limit--;
9452         } else {
9453                 tw32(HOSTCC_RXCOL_TICKS, 0);
9454                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9455                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9456         }
9457
9458         for (; i < limit; i++) {
9459                 u32 reg;
9460
9461                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9462                 tw32(reg, ec->rx_coalesce_usecs);
9463                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9464                 tw32(reg, ec->rx_max_coalesced_frames);
9465                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9466                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9467         }
9468
9469         for (; i < tp->irq_max - 1; i++) {
9470                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9471                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9472                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9473         }
9474 }
9475
9476 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9477 {
9478         tg3_coal_tx_init(tp, ec);
9479         tg3_coal_rx_init(tp, ec);
9480
9481         if (!tg3_flag(tp, 5705_PLUS)) {
9482                 u32 val = ec->stats_block_coalesce_usecs;
9483
9484                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9485                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9486
9487                 if (!tp->link_up)
9488                         val = 0;
9489
9490                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9491         }
9492 }
9493
9494 /* tp->lock is held. */
9495 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9496 {
9497         u32 txrcb, limit;
9498
9499         /* Disable all transmit rings but the first. */
9500         if (!tg3_flag(tp, 5705_PLUS))
9501                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9502         else if (tg3_flag(tp, 5717_PLUS))
9503                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9504         else if (tg3_flag(tp, 57765_CLASS) ||
9505                  tg3_asic_rev(tp) == ASIC_REV_5762)
9506                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9507         else
9508                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9509
9510         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9511              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9512                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9513                               BDINFO_FLAGS_DISABLED);
9514 }
9515
9516 /* tp->lock is held. */
9517 static void tg3_tx_rcbs_init(struct tg3 *tp)
9518 {
9519         int i = 0;
9520         u32 txrcb = NIC_SRAM_SEND_RCB;
9521
9522         if (tg3_flag(tp, ENABLE_TSS))
9523                 i++;
9524
9525         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9526                 struct tg3_napi *tnapi = &tp->napi[i];
9527
9528                 if (!tnapi->tx_ring)
9529                         continue;
9530
9531                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9532                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9533                                NIC_SRAM_TX_BUFFER_DESC);
9534         }
9535 }
9536
9537 /* tp->lock is held. */
9538 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9539 {
9540         u32 rxrcb, limit;
9541
9542         /* Disable all receive return rings but the first. */
9543         if (tg3_flag(tp, 5717_PLUS))
9544                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9545         else if (!tg3_flag(tp, 5705_PLUS))
9546                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9547         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9548                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9549                  tg3_flag(tp, 57765_CLASS))
9550                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9551         else
9552                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9553
9554         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9555              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9556                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9557                               BDINFO_FLAGS_DISABLED);
9558 }
9559
9560 /* tp->lock is held. */
9561 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9562 {
9563         int i = 0;
9564         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9565
9566         if (tg3_flag(tp, ENABLE_RSS))
9567                 i++;
9568
9569         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9570                 struct tg3_napi *tnapi = &tp->napi[i];
9571
9572                 if (!tnapi->rx_rcb)
9573                         continue;
9574
9575                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9576                                (tp->rx_ret_ring_mask + 1) <<
9577                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9578         }
9579 }
9580
9581 /* tp->lock is held. */
9582 static void tg3_rings_reset(struct tg3 *tp)
9583 {
9584         int i;
9585         u32 stblk;
9586         struct tg3_napi *tnapi = &tp->napi[0];
9587
9588         tg3_tx_rcbs_disable(tp);
9589
9590         tg3_rx_ret_rcbs_disable(tp);
9591
9592         /* Disable interrupts */
9593         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9594         tp->napi[0].chk_msi_cnt = 0;
9595         tp->napi[0].last_rx_cons = 0;
9596         tp->napi[0].last_tx_cons = 0;
9597
9598         /* Zero mailbox registers. */
9599         if (tg3_flag(tp, SUPPORT_MSIX)) {
9600                 for (i = 1; i < tp->irq_max; i++) {
9601                         tp->napi[i].tx_prod = 0;
9602                         tp->napi[i].tx_cons = 0;
9603                         if (tg3_flag(tp, ENABLE_TSS))
9604                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9605                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9606                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9607                         tp->napi[i].chk_msi_cnt = 0;
9608                         tp->napi[i].last_rx_cons = 0;
9609                         tp->napi[i].last_tx_cons = 0;
9610                 }
9611                 if (!tg3_flag(tp, ENABLE_TSS))
9612                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9613         } else {
9614                 tp->napi[0].tx_prod = 0;
9615                 tp->napi[0].tx_cons = 0;
9616                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9617                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9618         }
9619
9620         /* Make sure the NIC-based send BD rings are disabled. */
9621         if (!tg3_flag(tp, 5705_PLUS)) {
9622                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9623                 for (i = 0; i < 16; i++)
9624                         tw32_tx_mbox(mbox + i * 8, 0);
9625         }
9626
9627         /* Clear status block in ram. */
9628         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9629
9630         /* Set status block DMA address */
9631         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9632              ((u64) tnapi->status_mapping >> 32));
9633         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9634              ((u64) tnapi->status_mapping & 0xffffffff));
9635
9636         stblk = HOSTCC_STATBLCK_RING1;
9637
9638         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9639                 u64 mapping = (u64)tnapi->status_mapping;
9640                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9641                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9642                 stblk += 8;
9643
9644                 /* Clear status block in ram. */
9645                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9646         }
9647
9648         tg3_tx_rcbs_init(tp);
9649         tg3_rx_ret_rcbs_init(tp);
9650 }
9651
9652 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9653 {
9654         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9655
9656         if (!tg3_flag(tp, 5750_PLUS) ||
9657             tg3_flag(tp, 5780_CLASS) ||
9658             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9659             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9660             tg3_flag(tp, 57765_PLUS))
9661                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9662         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9663                  tg3_asic_rev(tp) == ASIC_REV_5787)
9664                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9665         else
9666                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9667
9668         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9669         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9670
9671         val = min(nic_rep_thresh, host_rep_thresh);
9672         tw32(RCVBDI_STD_THRESH, val);
9673
9674         if (tg3_flag(tp, 57765_PLUS))
9675                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9676
9677         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9678                 return;
9679
9680         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9681
9682         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9683
9684         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9685         tw32(RCVBDI_JUMBO_THRESH, val);
9686
9687         if (tg3_flag(tp, 57765_PLUS))
9688                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9689 }
9690
9691 static inline u32 calc_crc(unsigned char *buf, int len)
9692 {
9693         u32 reg;
9694         u32 tmp;
9695         int j, k;
9696
9697         reg = 0xffffffff;
9698
9699         for (j = 0; j < len; j++) {
9700                 reg ^= buf[j];
9701
9702                 for (k = 0; k < 8; k++) {
9703                         tmp = reg & 0x01;
9704
9705                         reg >>= 1;
9706
9707                         if (tmp)
9708                                 reg ^= CRC32_POLY_LE;
9709                 }
9710         }
9711
9712         return ~reg;
9713 }
9714
9715 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9716 {
9717         /* accept or reject all multicast frames */
9718         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9719         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9720         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9721         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9722 }
9723
9724 static void __tg3_set_rx_mode(struct net_device *dev)
9725 {
9726         struct tg3 *tp = netdev_priv(dev);
9727         u32 rx_mode;
9728
9729         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9730                                   RX_MODE_KEEP_VLAN_TAG);
9731
9732 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9733         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9734          * flag clear.
9735          */
9736         if (!tg3_flag(tp, ENABLE_ASF))
9737                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9738 #endif
9739
9740         if (dev->flags & IFF_PROMISC) {
9741                 /* Promiscuous mode. */
9742                 rx_mode |= RX_MODE_PROMISC;
9743         } else if (dev->flags & IFF_ALLMULTI) {
9744                 /* Accept all multicast. */
9745                 tg3_set_multi(tp, 1);
9746         } else if (netdev_mc_empty(dev)) {
9747                 /* Reject all multicast. */
9748                 tg3_set_multi(tp, 0);
9749         } else {
9750                 /* Accept one or more multicast(s). */
9751                 struct netdev_hw_addr *ha;
9752                 u32 mc_filter[4] = { 0, };
9753                 u32 regidx;
9754                 u32 bit;
9755                 u32 crc;
9756
9757                 netdev_for_each_mc_addr(ha, dev) {
9758                         crc = calc_crc(ha->addr, ETH_ALEN);
9759                         bit = ~crc & 0x7f;
9760                         regidx = (bit & 0x60) >> 5;
9761                         bit &= 0x1f;
9762                         mc_filter[regidx] |= (1 << bit);
9763                 }
9764
9765                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9766                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9767                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9768                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9769         }
9770
9771         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9772                 rx_mode |= RX_MODE_PROMISC;
9773         } else if (!(dev->flags & IFF_PROMISC)) {
9774                 /* Add all entries into to the mac addr filter list */
9775                 int i = 0;
9776                 struct netdev_hw_addr *ha;
9777
9778                 netdev_for_each_uc_addr(ha, dev) {
9779                         __tg3_set_one_mac_addr(tp, ha->addr,
9780                                                i + TG3_UCAST_ADDR_IDX(tp));
9781                         i++;
9782                 }
9783         }
9784
9785         if (rx_mode != tp->rx_mode) {
9786                 tp->rx_mode = rx_mode;
9787                 tw32_f(MAC_RX_MODE, rx_mode);
9788                 udelay(10);
9789         }
9790 }
9791
9792 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9793 {
9794         int i;
9795
9796         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9797                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9798 }
9799
9800 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9801 {
9802         int i;
9803
9804         if (!tg3_flag(tp, SUPPORT_MSIX))
9805                 return;
9806
9807         if (tp->rxq_cnt == 1) {
9808                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9809                 return;
9810         }
9811
9812         /* Validate table against current IRQ count */
9813         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9814                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9815                         break;
9816         }
9817
9818         if (i != TG3_RSS_INDIR_TBL_SIZE)
9819                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9820 }
9821
9822 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9823 {
9824         int i = 0;
9825         u32 reg = MAC_RSS_INDIR_TBL_0;
9826
9827         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9828                 u32 val = tp->rss_ind_tbl[i];
9829                 i++;
9830                 for (; i % 8; i++) {
9831                         val <<= 4;
9832                         val |= tp->rss_ind_tbl[i];
9833                 }
9834                 tw32(reg, val);
9835                 reg += 4;
9836         }
9837 }
9838
9839 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9840 {
9841         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9842                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9843         else
9844                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9845 }
9846
9847 /* tp->lock is held. */
9848 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9849 {
9850         u32 val, rdmac_mode;
9851         int i, err, limit;
9852         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9853
9854         tg3_disable_ints(tp);
9855
9856         tg3_stop_fw(tp);
9857
9858         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9859
9860         if (tg3_flag(tp, INIT_COMPLETE))
9861                 tg3_abort_hw(tp, 1);
9862
9863         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9864             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9865                 tg3_phy_pull_config(tp);
9866                 tg3_eee_pull_config(tp, NULL);
9867                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9868         }
9869
9870         /* Enable MAC control of LPI */
9871         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9872                 tg3_setup_eee(tp);
9873
9874         if (reset_phy)
9875                 tg3_phy_reset(tp);
9876
9877         err = tg3_chip_reset(tp);
9878         if (err)
9879                 return err;
9880
9881         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9882
9883         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9884                 val = tr32(TG3_CPMU_CTRL);
9885                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9886                 tw32(TG3_CPMU_CTRL, val);
9887
9888                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9889                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9890                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9891                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9892
9893                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9894                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9895                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9896                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9897
9898                 val = tr32(TG3_CPMU_HST_ACC);
9899                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9900                 val |= CPMU_HST_ACC_MACCLK_6_25;
9901                 tw32(TG3_CPMU_HST_ACC, val);
9902         }
9903
9904         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9905                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9906                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9907                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9908                 tw32(PCIE_PWR_MGMT_THRESH, val);
9909
9910                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9911                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9912
9913                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9914
9915                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9916                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9917         }
9918
9919         if (tg3_flag(tp, L1PLLPD_EN)) {
9920                 u32 grc_mode = tr32(GRC_MODE);
9921
9922                 /* Access the lower 1K of PL PCIE block registers. */
9923                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9924                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9925
9926                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9927                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9928                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9929
9930                 tw32(GRC_MODE, grc_mode);
9931         }
9932
9933         if (tg3_flag(tp, 57765_CLASS)) {
9934                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9935                         u32 grc_mode = tr32(GRC_MODE);
9936
9937                         /* Access the lower 1K of PL PCIE block registers. */
9938                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9939                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9940
9941                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9942                                    TG3_PCIE_PL_LO_PHYCTL5);
9943                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9944                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9945
9946                         tw32(GRC_MODE, grc_mode);
9947                 }
9948
9949                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9950                         u32 grc_mode;
9951
9952                         /* Fix transmit hangs */
9953                         val = tr32(TG3_CPMU_PADRNG_CTL);
9954                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9955                         tw32(TG3_CPMU_PADRNG_CTL, val);
9956
9957                         grc_mode = tr32(GRC_MODE);
9958
9959                         /* Access the lower 1K of DL PCIE block registers. */
9960                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9961                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9962
9963                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9964                                    TG3_PCIE_DL_LO_FTSMAX);
9965                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9966                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9967                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9968
9969                         tw32(GRC_MODE, grc_mode);
9970                 }
9971
9972                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9973                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9974                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9975                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9976         }
9977
9978         /* This works around an issue with Athlon chipsets on
9979          * B3 tigon3 silicon.  This bit has no effect on any
9980          * other revision.  But do not set this on PCI Express
9981          * chips and don't even touch the clocks if the CPMU is present.
9982          */
9983         if (!tg3_flag(tp, CPMU_PRESENT)) {
9984                 if (!tg3_flag(tp, PCI_EXPRESS))
9985                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9986                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9987         }
9988
9989         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9990             tg3_flag(tp, PCIX_MODE)) {
9991                 val = tr32(TG3PCI_PCISTATE);
9992                 val |= PCISTATE_RETRY_SAME_DMA;
9993                 tw32(TG3PCI_PCISTATE, val);
9994         }
9995
9996         if (tg3_flag(tp, ENABLE_APE)) {
9997                 /* Allow reads and writes to the
9998                  * APE register and memory space.
9999                  */
10000                 val = tr32(TG3PCI_PCISTATE);
10001                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10002                        PCISTATE_ALLOW_APE_SHMEM_WR |
10003                        PCISTATE_ALLOW_APE_PSPACE_WR;
10004                 tw32(TG3PCI_PCISTATE, val);
10005         }
10006
10007         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10008                 /* Enable some hw fixes.  */
10009                 val = tr32(TG3PCI_MSI_DATA);
10010                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10011                 tw32(TG3PCI_MSI_DATA, val);
10012         }
10013
10014         /* Descriptor ring init may make accesses to the
10015          * NIC SRAM area to setup the TX descriptors, so we
10016          * can only do this after the hardware has been
10017          * successfully reset.
10018          */
10019         err = tg3_init_rings(tp);
10020         if (err)
10021                 return err;
10022
10023         if (tg3_flag(tp, 57765_PLUS)) {
10024                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10025                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10026                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10027                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10028                 if (!tg3_flag(tp, 57765_CLASS) &&
10029                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10030                     tg3_asic_rev(tp) != ASIC_REV_5762)
10031                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10032                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10033         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10034                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10035                 /* This value is determined during the probe time DMA
10036                  * engine test, tg3_test_dma.
10037                  */
10038                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10039         }
10040
10041         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10042                           GRC_MODE_4X_NIC_SEND_RINGS |
10043                           GRC_MODE_NO_TX_PHDR_CSUM |
10044                           GRC_MODE_NO_RX_PHDR_CSUM);
10045         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10046
10047         /* Pseudo-header checksum is done by hardware logic and not
10048          * the offload processers, so make the chip do the pseudo-
10049          * header checksums on receive.  For transmit it is more
10050          * convenient to do the pseudo-header checksum in software
10051          * as Linux does that on transmit for us in all cases.
10052          */
10053         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10054
10055         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10056         if (tp->rxptpctl)
10057                 tw32(TG3_RX_PTP_CTL,
10058                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10059
10060         if (tg3_flag(tp, PTP_CAPABLE))
10061                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10062
10063         tw32(GRC_MODE, tp->grc_mode | val);
10064
10065         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10066          * south bridge limitation. As a workaround, Driver is setting MRRS
10067          * to 2048 instead of default 4096.
10068          */
10069         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10070             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10071                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10072                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10073         }
10074
10075         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10076         val = tr32(GRC_MISC_CFG);
10077         val &= ~0xff;
10078         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10079         tw32(GRC_MISC_CFG, val);
10080
10081         /* Initialize MBUF/DESC pool. */
10082         if (tg3_flag(tp, 5750_PLUS)) {
10083                 /* Do nothing.  */
10084         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10085                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10086                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10087                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10088                 else
10089                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10090                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10091                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10092         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10093                 int fw_len;
10094
10095                 fw_len = tp->fw_len;
10096                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10097                 tw32(BUFMGR_MB_POOL_ADDR,
10098                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10099                 tw32(BUFMGR_MB_POOL_SIZE,
10100                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10101         }
10102
10103         if (tp->dev->mtu <= ETH_DATA_LEN) {
10104                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10105                      tp->bufmgr_config.mbuf_read_dma_low_water);
10106                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10107                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10108                 tw32(BUFMGR_MB_HIGH_WATER,
10109                      tp->bufmgr_config.mbuf_high_water);
10110         } else {
10111                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10112                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10113                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10114                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10115                 tw32(BUFMGR_MB_HIGH_WATER,
10116                      tp->bufmgr_config.mbuf_high_water_jumbo);
10117         }
10118         tw32(BUFMGR_DMA_LOW_WATER,
10119              tp->bufmgr_config.dma_low_water);
10120         tw32(BUFMGR_DMA_HIGH_WATER,
10121              tp->bufmgr_config.dma_high_water);
10122
10123         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10124         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10125                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10126         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10127             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10128             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10129             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10130                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10131         tw32(BUFMGR_MODE, val);
10132         for (i = 0; i < 2000; i++) {
10133                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10134                         break;
10135                 udelay(10);
10136         }
10137         if (i >= 2000) {
10138                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10139                 return -ENODEV;
10140         }
10141
10142         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10143                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10144
10145         tg3_setup_rxbd_thresholds(tp);
10146
10147         /* Initialize TG3_BDINFO's at:
10148          *  RCVDBDI_STD_BD:     standard eth size rx ring
10149          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10150          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10151          *
10152          * like so:
10153          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10154          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10155          *                              ring attribute flags
10156          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10157          *
10158          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10159          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10160          *
10161          * The size of each ring is fixed in the firmware, but the location is
10162          * configurable.
10163          */
10164         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10165              ((u64) tpr->rx_std_mapping >> 32));
10166         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10167              ((u64) tpr->rx_std_mapping & 0xffffffff));
10168         if (!tg3_flag(tp, 5717_PLUS))
10169                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10170                      NIC_SRAM_RX_BUFFER_DESC);
10171
10172         /* Disable the mini ring */
10173         if (!tg3_flag(tp, 5705_PLUS))
10174                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10175                      BDINFO_FLAGS_DISABLED);
10176
10177         /* Program the jumbo buffer descriptor ring control
10178          * blocks on those devices that have them.
10179          */
10180         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10181             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10182
10183                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10184                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10185                              ((u64) tpr->rx_jmb_mapping >> 32));
10186                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10187                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10188                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10189                               BDINFO_FLAGS_MAXLEN_SHIFT;
10190                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10191                              val | BDINFO_FLAGS_USE_EXT_RECV);
10192                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10193                             tg3_flag(tp, 57765_CLASS) ||
10194                             tg3_asic_rev(tp) == ASIC_REV_5762)
10195                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10196                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10197                 } else {
10198                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10199                              BDINFO_FLAGS_DISABLED);
10200                 }
10201
10202                 if (tg3_flag(tp, 57765_PLUS)) {
10203                         val = TG3_RX_STD_RING_SIZE(tp);
10204                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10205                         val |= (TG3_RX_STD_DMA_SZ << 2);
10206                 } else
10207                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10208         } else
10209                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10210
10211         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10212
10213         tpr->rx_std_prod_idx = tp->rx_pending;
10214         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10215
10216         tpr->rx_jmb_prod_idx =
10217                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10218         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10219
10220         tg3_rings_reset(tp);
10221
10222         /* Initialize MAC address and backoff seed. */
10223         __tg3_set_mac_addr(tp, false);
10224
10225         /* MTU + ethernet header + FCS + optional VLAN tag */
10226         tw32(MAC_RX_MTU_SIZE,
10227              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10228
10229         /* The slot time is changed by tg3_setup_phy if we
10230          * run at gigabit with half duplex.
10231          */
10232         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10233               (6 << TX_LENGTHS_IPG_SHIFT) |
10234               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10235
10236         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10237             tg3_asic_rev(tp) == ASIC_REV_5762)
10238                 val |= tr32(MAC_TX_LENGTHS) &
10239                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10240                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10241
10242         tw32(MAC_TX_LENGTHS, val);
10243
10244         /* Receive rules. */
10245         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10246         tw32(RCVLPC_CONFIG, 0x0181);
10247
10248         /* Calculate RDMAC_MODE setting early, we need it to determine
10249          * the RCVLPC_STATE_ENABLE mask.
10250          */
10251         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10252                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10253                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10254                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10255                       RDMAC_MODE_LNGREAD_ENAB);
10256
10257         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10258                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10259
10260         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10261             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10262             tg3_asic_rev(tp) == ASIC_REV_57780)
10263                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10264                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10265                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10266
10267         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10268             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10269                 if (tg3_flag(tp, TSO_CAPABLE)) {
10270                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10271                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10272                            !tg3_flag(tp, IS_5788)) {
10273                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10274                 }
10275         }
10276
10277         if (tg3_flag(tp, PCI_EXPRESS))
10278                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10279
10280         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10281                 tp->dma_limit = 0;
10282                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10283                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10284                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10285                 }
10286         }
10287
10288         if (tg3_flag(tp, HW_TSO_1) ||
10289             tg3_flag(tp, HW_TSO_2) ||
10290             tg3_flag(tp, HW_TSO_3))
10291                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10292
10293         if (tg3_flag(tp, 57765_PLUS) ||
10294             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10295             tg3_asic_rev(tp) == ASIC_REV_57780)
10296                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10297
10298         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10299             tg3_asic_rev(tp) == ASIC_REV_5762)
10300                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10301
10302         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10303             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10304             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10305             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10306             tg3_flag(tp, 57765_PLUS)) {
10307                 u32 tgtreg;
10308
10309                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10310                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10311                 else
10312                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10313
10314                 val = tr32(tgtreg);
10315                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10316                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10317                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10318                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10319                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10320                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10321                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10322                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10323                 }
10324                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10325         }
10326
10327         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10328             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10329             tg3_asic_rev(tp) == ASIC_REV_5762) {
10330                 u32 tgtreg;
10331
10332                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10333                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10334                 else
10335                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10336
10337                 val = tr32(tgtreg);
10338                 tw32(tgtreg, val |
10339                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10340                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10341         }
10342
10343         /* Receive/send statistics. */
10344         if (tg3_flag(tp, 5750_PLUS)) {
10345                 val = tr32(RCVLPC_STATS_ENABLE);
10346                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10347                 tw32(RCVLPC_STATS_ENABLE, val);
10348         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10349                    tg3_flag(tp, TSO_CAPABLE)) {
10350                 val = tr32(RCVLPC_STATS_ENABLE);
10351                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10352                 tw32(RCVLPC_STATS_ENABLE, val);
10353         } else {
10354                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10355         }
10356         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10357         tw32(SNDDATAI_STATSENAB, 0xffffff);
10358         tw32(SNDDATAI_STATSCTRL,
10359              (SNDDATAI_SCTRL_ENABLE |
10360               SNDDATAI_SCTRL_FASTUPD));
10361
10362         /* Setup host coalescing engine. */
10363         tw32(HOSTCC_MODE, 0);
10364         for (i = 0; i < 2000; i++) {
10365                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10366                         break;
10367                 udelay(10);
10368         }
10369
10370         __tg3_set_coalesce(tp, &tp->coal);
10371
10372         if (!tg3_flag(tp, 5705_PLUS)) {
10373                 /* Status/statistics block address.  See tg3_timer,
10374                  * the tg3_periodic_fetch_stats call there, and
10375                  * tg3_get_stats to see how this works for 5705/5750 chips.
10376                  */
10377                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10378                      ((u64) tp->stats_mapping >> 32));
10379                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10380                      ((u64) tp->stats_mapping & 0xffffffff));
10381                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10382
10383                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10384
10385                 /* Clear statistics and status block memory areas */
10386                 for (i = NIC_SRAM_STATS_BLK;
10387                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10388                      i += sizeof(u32)) {
10389                         tg3_write_mem(tp, i, 0);
10390                         udelay(40);
10391                 }
10392         }
10393
10394         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10395
10396         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10397         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10398         if (!tg3_flag(tp, 5705_PLUS))
10399                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10400
10401         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10402                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10403                 /* reset to prevent losing 1st rx packet intermittently */
10404                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10405                 udelay(10);
10406         }
10407
10408         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10409                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10410                         MAC_MODE_FHDE_ENABLE;
10411         if (tg3_flag(tp, ENABLE_APE))
10412                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10413         if (!tg3_flag(tp, 5705_PLUS) &&
10414             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10415             tg3_asic_rev(tp) != ASIC_REV_5700)
10416                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10417         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10418         udelay(40);
10419
10420         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10421          * If TG3_FLAG_IS_NIC is zero, we should read the
10422          * register to preserve the GPIO settings for LOMs. The GPIOs,
10423          * whether used as inputs or outputs, are set by boot code after
10424          * reset.
10425          */
10426         if (!tg3_flag(tp, IS_NIC)) {
10427                 u32 gpio_mask;
10428
10429                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10430                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10431                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10432
10433                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10434                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10435                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10436
10437                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10438                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10439
10440                 tp->grc_local_ctrl &= ~gpio_mask;
10441                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10442
10443                 /* GPIO1 must be driven high for eeprom write protect */
10444                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10445                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10446                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10447         }
10448         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10449         udelay(100);
10450
10451         if (tg3_flag(tp, USING_MSIX)) {
10452                 val = tr32(MSGINT_MODE);
10453                 val |= MSGINT_MODE_ENABLE;
10454                 if (tp->irq_cnt > 1)
10455                         val |= MSGINT_MODE_MULTIVEC_EN;
10456                 if (!tg3_flag(tp, 1SHOT_MSI))
10457                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10458                 tw32(MSGINT_MODE, val);
10459         }
10460
10461         if (!tg3_flag(tp, 5705_PLUS)) {
10462                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10463                 udelay(40);
10464         }
10465
10466         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10467                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10468                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10469                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10470                WDMAC_MODE_LNGREAD_ENAB);
10471
10472         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10473             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10474                 if (tg3_flag(tp, TSO_CAPABLE) &&
10475                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10476                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10477                         /* nothing */
10478                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10479                            !tg3_flag(tp, IS_5788)) {
10480                         val |= WDMAC_MODE_RX_ACCEL;
10481                 }
10482         }
10483
10484         /* Enable host coalescing bug fix */
10485         if (tg3_flag(tp, 5755_PLUS))
10486                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10487
10488         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10489                 val |= WDMAC_MODE_BURST_ALL_DATA;
10490
10491         tw32_f(WDMAC_MODE, val);
10492         udelay(40);
10493
10494         if (tg3_flag(tp, PCIX_MODE)) {
10495                 u16 pcix_cmd;
10496
10497                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10498                                      &pcix_cmd);
10499                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10500                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10501                         pcix_cmd |= PCI_X_CMD_READ_2K;
10502                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10503                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10504                         pcix_cmd |= PCI_X_CMD_READ_2K;
10505                 }
10506                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10507                                       pcix_cmd);
10508         }
10509
10510         tw32_f(RDMAC_MODE, rdmac_mode);
10511         udelay(40);
10512
10513         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10514             tg3_asic_rev(tp) == ASIC_REV_5720) {
10515                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10516                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10517                                 break;
10518                 }
10519                 if (i < TG3_NUM_RDMA_CHANNELS) {
10520                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10521                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10522                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10523                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10524                 }
10525         }
10526
10527         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10528         if (!tg3_flag(tp, 5705_PLUS))
10529                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10530
10531         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10532                 tw32(SNDDATAC_MODE,
10533                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10534         else
10535                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10536
10537         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10538         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10539         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10540         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10541                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10542         tw32(RCVDBDI_MODE, val);
10543         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10544         if (tg3_flag(tp, HW_TSO_1) ||
10545             tg3_flag(tp, HW_TSO_2) ||
10546             tg3_flag(tp, HW_TSO_3))
10547                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10548         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10549         if (tg3_flag(tp, ENABLE_TSS))
10550                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10551         tw32(SNDBDI_MODE, val);
10552         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10553
10554         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10555                 err = tg3_load_5701_a0_firmware_fix(tp);
10556                 if (err)
10557                         return err;
10558         }
10559
10560         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10561                 /* Ignore any errors for the firmware download. If download
10562                  * fails, the device will operate with EEE disabled
10563                  */
10564                 tg3_load_57766_firmware(tp);
10565         }
10566
10567         if (tg3_flag(tp, TSO_CAPABLE)) {
10568                 err = tg3_load_tso_firmware(tp);
10569                 if (err)
10570                         return err;
10571         }
10572
10573         tp->tx_mode = TX_MODE_ENABLE;
10574
10575         if (tg3_flag(tp, 5755_PLUS) ||
10576             tg3_asic_rev(tp) == ASIC_REV_5906)
10577                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10578
10579         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10580             tg3_asic_rev(tp) == ASIC_REV_5762) {
10581                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10582                 tp->tx_mode &= ~val;
10583                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10584         }
10585
10586         tw32_f(MAC_TX_MODE, tp->tx_mode);
10587         udelay(100);
10588
10589         if (tg3_flag(tp, ENABLE_RSS)) {
10590                 u32 rss_key[10];
10591
10592                 tg3_rss_write_indir_tbl(tp);
10593
10594                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10595
10596                 for (i = 0; i < 10 ; i++)
10597                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10598         }
10599
10600         tp->rx_mode = RX_MODE_ENABLE;
10601         if (tg3_flag(tp, 5755_PLUS))
10602                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10603
10604         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10605                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10606
10607         if (tg3_flag(tp, ENABLE_RSS))
10608                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10609                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10610                                RX_MODE_RSS_IPV6_HASH_EN |
10611                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10612                                RX_MODE_RSS_IPV4_HASH_EN |
10613                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10614
10615         tw32_f(MAC_RX_MODE, tp->rx_mode);
10616         udelay(10);
10617
10618         tw32(MAC_LED_CTRL, tp->led_ctrl);
10619
10620         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10621         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10622                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10623                 udelay(10);
10624         }
10625         tw32_f(MAC_RX_MODE, tp->rx_mode);
10626         udelay(10);
10627
10628         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10629                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10630                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10631                         /* Set drive transmission level to 1.2V  */
10632                         /* only if the signal pre-emphasis bit is not set  */
10633                         val = tr32(MAC_SERDES_CFG);
10634                         val &= 0xfffff000;
10635                         val |= 0x880;
10636                         tw32(MAC_SERDES_CFG, val);
10637                 }
10638                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10639                         tw32(MAC_SERDES_CFG, 0x616000);
10640         }
10641
10642         /* Prevent chip from dropping frames when flow control
10643          * is enabled.
10644          */
10645         if (tg3_flag(tp, 57765_CLASS))
10646                 val = 1;
10647         else
10648                 val = 2;
10649         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10650
10651         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10652             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10653                 /* Use hardware link auto-negotiation */
10654                 tg3_flag_set(tp, HW_AUTONEG);
10655         }
10656
10657         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10658             tg3_asic_rev(tp) == ASIC_REV_5714) {
10659                 u32 tmp;
10660
10661                 tmp = tr32(SERDES_RX_CTRL);
10662                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10663                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10664                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10665                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10666         }
10667
10668         if (!tg3_flag(tp, USE_PHYLIB)) {
10669                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10670                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10671
10672                 err = tg3_setup_phy(tp, false);
10673                 if (err)
10674                         return err;
10675
10676                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10677                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10678                         u32 tmp;
10679
10680                         /* Clear CRC stats. */
10681                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10682                                 tg3_writephy(tp, MII_TG3_TEST1,
10683                                              tmp | MII_TG3_TEST1_CRC_EN);
10684                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10685                         }
10686                 }
10687         }
10688
10689         __tg3_set_rx_mode(tp->dev);
10690
10691         /* Initialize receive rules. */
10692         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10693         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10694         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10695         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10696
10697         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10698                 limit = 8;
10699         else
10700                 limit = 16;
10701         if (tg3_flag(tp, ENABLE_ASF))
10702                 limit -= 4;
10703         switch (limit) {
10704         case 16:
10705                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10706                 fallthrough;
10707         case 15:
10708                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10709                 fallthrough;
10710         case 14:
10711                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10712                 fallthrough;
10713         case 13:
10714                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10715                 fallthrough;
10716         case 12:
10717                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10718                 fallthrough;
10719         case 11:
10720                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10721                 fallthrough;
10722         case 10:
10723                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10724                 fallthrough;
10725         case 9:
10726                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10727                 fallthrough;
10728         case 8:
10729                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10730                 fallthrough;
10731         case 7:
10732                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10733                 fallthrough;
10734         case 6:
10735                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10736                 fallthrough;
10737         case 5:
10738                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10739                 fallthrough;
10740         case 4:
10741                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10742         case 3:
10743                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10744         case 2:
10745         case 1:
10746
10747         default:
10748                 break;
10749         }
10750
10751         if (tg3_flag(tp, ENABLE_APE))
10752                 /* Write our heartbeat update interval to APE. */
10753                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10754                                 APE_HOST_HEARTBEAT_INT_5SEC);
10755
10756         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10757
10758         return 0;
10759 }
10760
10761 /* Called at device open time to get the chip ready for
10762  * packet processing.  Invoked with tp->lock held.
10763  */
10764 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10765 {
10766         /* Chip may have been just powered on. If so, the boot code may still
10767          * be running initialization. Wait for it to finish to avoid races in
10768          * accessing the hardware.
10769          */
10770         tg3_enable_register_access(tp);
10771         tg3_poll_fw(tp);
10772
10773         tg3_switch_clocks(tp);
10774
10775         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10776
10777         return tg3_reset_hw(tp, reset_phy);
10778 }
10779
10780 #ifdef CONFIG_TIGON3_HWMON
10781 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10782 {
10783         u32 off, len = TG3_OCIR_LEN;
10784         int i;
10785
10786         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10787                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10788
10789                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10790                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10791                         memset(ocir, 0, len);
10792         }
10793 }
10794
10795 /* sysfs attributes for hwmon */
10796 static ssize_t tg3_show_temp(struct device *dev,
10797                              struct device_attribute *devattr, char *buf)
10798 {
10799         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10800         struct tg3 *tp = dev_get_drvdata(dev);
10801         u32 temperature;
10802
10803         spin_lock_bh(&tp->lock);
10804         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10805                                 sizeof(temperature));
10806         spin_unlock_bh(&tp->lock);
10807         return sprintf(buf, "%u\n", temperature * 1000);
10808 }
10809
10810
10811 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10812                           TG3_TEMP_SENSOR_OFFSET);
10813 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10814                           TG3_TEMP_CAUTION_OFFSET);
10815 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10816                           TG3_TEMP_MAX_OFFSET);
10817
10818 static struct attribute *tg3_attrs[] = {
10819         &sensor_dev_attr_temp1_input.dev_attr.attr,
10820         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10821         &sensor_dev_attr_temp1_max.dev_attr.attr,
10822         NULL
10823 };
10824 ATTRIBUTE_GROUPS(tg3);
10825
10826 static void tg3_hwmon_close(struct tg3 *tp)
10827 {
10828         if (tp->hwmon_dev) {
10829                 hwmon_device_unregister(tp->hwmon_dev);
10830                 tp->hwmon_dev = NULL;
10831         }
10832 }
10833
10834 static void tg3_hwmon_open(struct tg3 *tp)
10835 {
10836         int i;
10837         u32 size = 0;
10838         struct pci_dev *pdev = tp->pdev;
10839         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10840
10841         tg3_sd_scan_scratchpad(tp, ocirs);
10842
10843         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10844                 if (!ocirs[i].src_data_length)
10845                         continue;
10846
10847                 size += ocirs[i].src_hdr_length;
10848                 size += ocirs[i].src_data_length;
10849         }
10850
10851         if (!size)
10852                 return;
10853
10854         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10855                                                           tp, tg3_groups);
10856         if (IS_ERR(tp->hwmon_dev)) {
10857                 tp->hwmon_dev = NULL;
10858                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10859         }
10860 }
10861 #else
10862 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10863 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10864 #endif /* CONFIG_TIGON3_HWMON */
10865
10866
10867 #define TG3_STAT_ADD32(PSTAT, REG) \
10868 do {    u32 __val = tr32(REG); \
10869         (PSTAT)->low += __val; \
10870         if ((PSTAT)->low < __val) \
10871                 (PSTAT)->high += 1; \
10872 } while (0)
10873
10874 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10875 {
10876         struct tg3_hw_stats *sp = tp->hw_stats;
10877
10878         if (!tp->link_up)
10879                 return;
10880
10881         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10882         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10883         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10884         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10885         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10886         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10887         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10888         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10889         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10890         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10891         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10892         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10893         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10894         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10895                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10896                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10897                 u32 val;
10898
10899                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10900                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10901                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10902                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10903         }
10904
10905         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10906         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10907         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10908         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10909         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10910         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10911         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10912         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10913         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10914         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10915         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10916         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10917         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10918         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10919
10920         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10921         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10922             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10923             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10924             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10925                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10926         } else {
10927                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10928                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10929                 if (val) {
10930                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10931                         sp->rx_discards.low += val;
10932                         if (sp->rx_discards.low < val)
10933                                 sp->rx_discards.high += 1;
10934                 }
10935                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10936         }
10937         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10938 }
10939
10940 static void tg3_chk_missed_msi(struct tg3 *tp)
10941 {
10942         u32 i;
10943
10944         for (i = 0; i < tp->irq_cnt; i++) {
10945                 struct tg3_napi *tnapi = &tp->napi[i];
10946
10947                 if (tg3_has_work(tnapi)) {
10948                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10949                             tnapi->last_tx_cons == tnapi->tx_cons) {
10950                                 if (tnapi->chk_msi_cnt < 1) {
10951                                         tnapi->chk_msi_cnt++;
10952                                         return;
10953                                 }
10954                                 tg3_msi(0, tnapi);
10955                         }
10956                 }
10957                 tnapi->chk_msi_cnt = 0;
10958                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10959                 tnapi->last_tx_cons = tnapi->tx_cons;
10960         }
10961 }
10962
10963 static void tg3_timer(struct timer_list *t)
10964 {
10965         struct tg3 *tp = from_timer(tp, t, timer);
10966
10967         spin_lock(&tp->lock);
10968
10969         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10970                 spin_unlock(&tp->lock);
10971                 goto restart_timer;
10972         }
10973
10974         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10975             tg3_flag(tp, 57765_CLASS))
10976                 tg3_chk_missed_msi(tp);
10977
10978         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10979                 /* BCM4785: Flush posted writes from GbE to host memory. */
10980                 tr32(HOSTCC_MODE);
10981         }
10982
10983         if (!tg3_flag(tp, TAGGED_STATUS)) {
10984                 /* All of this garbage is because when using non-tagged
10985                  * IRQ status the mailbox/status_block protocol the chip
10986                  * uses with the cpu is race prone.
10987                  */
10988                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10989                         tw32(GRC_LOCAL_CTRL,
10990                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10991                 } else {
10992                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10993                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10994                 }
10995
10996                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10997                         spin_unlock(&tp->lock);
10998                         tg3_reset_task_schedule(tp);
10999                         goto restart_timer;
11000                 }
11001         }
11002
11003         /* This part only runs once per second. */
11004         if (!--tp->timer_counter) {
11005                 if (tg3_flag(tp, 5705_PLUS))
11006                         tg3_periodic_fetch_stats(tp);
11007
11008                 if (tp->setlpicnt && !--tp->setlpicnt)
11009                         tg3_phy_eee_enable(tp);
11010
11011                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11012                         u32 mac_stat;
11013                         int phy_event;
11014
11015                         mac_stat = tr32(MAC_STATUS);
11016
11017                         phy_event = 0;
11018                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11019                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11020                                         phy_event = 1;
11021                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11022                                 phy_event = 1;
11023
11024                         if (phy_event)
11025                                 tg3_setup_phy(tp, false);
11026                 } else if (tg3_flag(tp, POLL_SERDES)) {
11027                         u32 mac_stat = tr32(MAC_STATUS);
11028                         int need_setup = 0;
11029
11030                         if (tp->link_up &&
11031                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11032                                 need_setup = 1;
11033                         }
11034                         if (!tp->link_up &&
11035                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11036                                          MAC_STATUS_SIGNAL_DET))) {
11037                                 need_setup = 1;
11038                         }
11039                         if (need_setup) {
11040                                 if (!tp->serdes_counter) {
11041                                         tw32_f(MAC_MODE,
11042                                              (tp->mac_mode &
11043                                               ~MAC_MODE_PORT_MODE_MASK));
11044                                         udelay(40);
11045                                         tw32_f(MAC_MODE, tp->mac_mode);
11046                                         udelay(40);
11047                                 }
11048                                 tg3_setup_phy(tp, false);
11049                         }
11050                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11051                            tg3_flag(tp, 5780_CLASS)) {
11052                         tg3_serdes_parallel_detect(tp);
11053                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11054                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11055                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11056                                          TG3_CPMU_STATUS_LINK_MASK);
11057
11058                         if (link_up != tp->link_up)
11059                                 tg3_setup_phy(tp, false);
11060                 }
11061
11062                 tp->timer_counter = tp->timer_multiplier;
11063         }
11064
11065         /* Heartbeat is only sent once every 2 seconds.
11066          *
11067          * The heartbeat is to tell the ASF firmware that the host
11068          * driver is still alive.  In the event that the OS crashes,
11069          * ASF needs to reset the hardware to free up the FIFO space
11070          * that may be filled with rx packets destined for the host.
11071          * If the FIFO is full, ASF will no longer function properly.
11072          *
11073          * Unintended resets have been reported on real time kernels
11074          * where the timer doesn't run on time.  Netpoll will also have
11075          * same problem.
11076          *
11077          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11078          * to check the ring condition when the heartbeat is expiring
11079          * before doing the reset.  This will prevent most unintended
11080          * resets.
11081          */
11082         if (!--tp->asf_counter) {
11083                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11084                         tg3_wait_for_event_ack(tp);
11085
11086                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11087                                       FWCMD_NICDRV_ALIVE3);
11088                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11089                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11090                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11091
11092                         tg3_generate_fw_event(tp);
11093                 }
11094                 tp->asf_counter = tp->asf_multiplier;
11095         }
11096
11097         /* Update the APE heartbeat every 5 seconds.*/
11098         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11099
11100         spin_unlock(&tp->lock);
11101
11102 restart_timer:
11103         tp->timer.expires = jiffies + tp->timer_offset;
11104         add_timer(&tp->timer);
11105 }
11106
11107 static void tg3_timer_init(struct tg3 *tp)
11108 {
11109         if (tg3_flag(tp, TAGGED_STATUS) &&
11110             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11111             !tg3_flag(tp, 57765_CLASS))
11112                 tp->timer_offset = HZ;
11113         else
11114                 tp->timer_offset = HZ / 10;
11115
11116         BUG_ON(tp->timer_offset > HZ);
11117
11118         tp->timer_multiplier = (HZ / tp->timer_offset);
11119         tp->asf_multiplier = (HZ / tp->timer_offset) *
11120                              TG3_FW_UPDATE_FREQ_SEC;
11121
11122         timer_setup(&tp->timer, tg3_timer, 0);
11123 }
11124
11125 static void tg3_timer_start(struct tg3 *tp)
11126 {
11127         tp->asf_counter   = tp->asf_multiplier;
11128         tp->timer_counter = tp->timer_multiplier;
11129
11130         tp->timer.expires = jiffies + tp->timer_offset;
11131         add_timer(&tp->timer);
11132 }
11133
11134 static void tg3_timer_stop(struct tg3 *tp)
11135 {
11136         del_timer_sync(&tp->timer);
11137 }
11138
11139 /* Restart hardware after configuration changes, self-test, etc.
11140  * Invoked with tp->lock held.
11141  */
11142 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11143         __releases(tp->lock)
11144         __acquires(tp->lock)
11145 {
11146         int err;
11147
11148         err = tg3_init_hw(tp, reset_phy);
11149         if (err) {
11150                 netdev_err(tp->dev,
11151                            "Failed to re-initialize device, aborting\n");
11152                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11153                 tg3_full_unlock(tp);
11154                 tg3_timer_stop(tp);
11155                 tp->irq_sync = 0;
11156                 tg3_napi_enable(tp);
11157                 dev_close(tp->dev);
11158                 tg3_full_lock(tp, 0);
11159         }
11160         return err;
11161 }
11162
11163 static void tg3_reset_task(struct work_struct *work)
11164 {
11165         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11166         int err;
11167
11168         rtnl_lock();
11169         tg3_full_lock(tp, 0);
11170
11171         if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11172                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11173                 tg3_full_unlock(tp);
11174                 rtnl_unlock();
11175                 return;
11176         }
11177
11178         tg3_full_unlock(tp);
11179
11180         tg3_phy_stop(tp);
11181
11182         tg3_netif_stop(tp);
11183
11184         tg3_full_lock(tp, 1);
11185
11186         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11187                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11188                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11189                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11190                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11191         }
11192
11193         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11194         err = tg3_init_hw(tp, true);
11195         if (err) {
11196                 tg3_full_unlock(tp);
11197                 tp->irq_sync = 0;
11198                 tg3_napi_enable(tp);
11199                 /* Clear this flag so that tg3_reset_task_cancel() will not
11200                  * call cancel_work_sync() and wait forever.
11201                  */
11202                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11203                 dev_close(tp->dev);
11204                 goto out;
11205         }
11206
11207         tg3_netif_start(tp);
11208         tg3_full_unlock(tp);
11209         tg3_phy_start(tp);
11210         tg3_flag_clear(tp, RESET_TASK_PENDING);
11211 out:
11212         rtnl_unlock();
11213 }
11214
11215 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11216 {
11217         irq_handler_t fn;
11218         unsigned long flags;
11219         char *name;
11220         struct tg3_napi *tnapi = &tp->napi[irq_num];
11221
11222         if (tp->irq_cnt == 1)
11223                 name = tp->dev->name;
11224         else {
11225                 name = &tnapi->irq_lbl[0];
11226                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11227                         snprintf(name, IFNAMSIZ,
11228                                  "%s-txrx-%d", tp->dev->name, irq_num);
11229                 else if (tnapi->tx_buffers)
11230                         snprintf(name, IFNAMSIZ,
11231                                  "%s-tx-%d", tp->dev->name, irq_num);
11232                 else if (tnapi->rx_rcb)
11233                         snprintf(name, IFNAMSIZ,
11234                                  "%s-rx-%d", tp->dev->name, irq_num);
11235                 else
11236                         snprintf(name, IFNAMSIZ,
11237                                  "%s-%d", tp->dev->name, irq_num);
11238                 name[IFNAMSIZ-1] = 0;
11239         }
11240
11241         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11242                 fn = tg3_msi;
11243                 if (tg3_flag(tp, 1SHOT_MSI))
11244                         fn = tg3_msi_1shot;
11245                 flags = 0;
11246         } else {
11247                 fn = tg3_interrupt;
11248                 if (tg3_flag(tp, TAGGED_STATUS))
11249                         fn = tg3_interrupt_tagged;
11250                 flags = IRQF_SHARED;
11251         }
11252
11253         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11254 }
11255
11256 static int tg3_test_interrupt(struct tg3 *tp)
11257 {
11258         struct tg3_napi *tnapi = &tp->napi[0];
11259         struct net_device *dev = tp->dev;
11260         int err, i, intr_ok = 0;
11261         u32 val;
11262
11263         if (!netif_running(dev))
11264                 return -ENODEV;
11265
11266         tg3_disable_ints(tp);
11267
11268         free_irq(tnapi->irq_vec, tnapi);
11269
11270         /*
11271          * Turn off MSI one shot mode.  Otherwise this test has no
11272          * observable way to know whether the interrupt was delivered.
11273          */
11274         if (tg3_flag(tp, 57765_PLUS)) {
11275                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11276                 tw32(MSGINT_MODE, val);
11277         }
11278
11279         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11280                           IRQF_SHARED, dev->name, tnapi);
11281         if (err)
11282                 return err;
11283
11284         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11285         tg3_enable_ints(tp);
11286
11287         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11288                tnapi->coal_now);
11289
11290         for (i = 0; i < 5; i++) {
11291                 u32 int_mbox, misc_host_ctrl;
11292
11293                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11294                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11295
11296                 if ((int_mbox != 0) ||
11297                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11298                         intr_ok = 1;
11299                         break;
11300                 }
11301
11302                 if (tg3_flag(tp, 57765_PLUS) &&
11303                     tnapi->hw_status->status_tag != tnapi->last_tag)
11304                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11305
11306                 msleep(10);
11307         }
11308
11309         tg3_disable_ints(tp);
11310
11311         free_irq(tnapi->irq_vec, tnapi);
11312
11313         err = tg3_request_irq(tp, 0);
11314
11315         if (err)
11316                 return err;
11317
11318         if (intr_ok) {
11319                 /* Reenable MSI one shot mode. */
11320                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11321                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11322                         tw32(MSGINT_MODE, val);
11323                 }
11324                 return 0;
11325         }
11326
11327         return -EIO;
11328 }
11329
11330 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11331  * successfully restored
11332  */
11333 static int tg3_test_msi(struct tg3 *tp)
11334 {
11335         int err;
11336         u16 pci_cmd;
11337
11338         if (!tg3_flag(tp, USING_MSI))
11339                 return 0;
11340
11341         /* Turn off SERR reporting in case MSI terminates with Master
11342          * Abort.
11343          */
11344         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11345         pci_write_config_word(tp->pdev, PCI_COMMAND,
11346                               pci_cmd & ~PCI_COMMAND_SERR);
11347
11348         err = tg3_test_interrupt(tp);
11349
11350         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11351
11352         if (!err)
11353                 return 0;
11354
11355         /* other failures */
11356         if (err != -EIO)
11357                 return err;
11358
11359         /* MSI test failed, go back to INTx mode */
11360         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11361                     "to INTx mode. Please report this failure to the PCI "
11362                     "maintainer and include system chipset information\n");
11363
11364         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11365
11366         pci_disable_msi(tp->pdev);
11367
11368         tg3_flag_clear(tp, USING_MSI);
11369         tp->napi[0].irq_vec = tp->pdev->irq;
11370
11371         err = tg3_request_irq(tp, 0);
11372         if (err)
11373                 return err;
11374
11375         /* Need to reset the chip because the MSI cycle may have terminated
11376          * with Master Abort.
11377          */
11378         tg3_full_lock(tp, 1);
11379
11380         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11381         err = tg3_init_hw(tp, true);
11382
11383         tg3_full_unlock(tp);
11384
11385         if (err)
11386                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11387
11388         return err;
11389 }
11390
11391 static int tg3_request_firmware(struct tg3 *tp)
11392 {
11393         const struct tg3_firmware_hdr *fw_hdr;
11394
11395         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11396                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11397                            tp->fw_needed);
11398                 return -ENOENT;
11399         }
11400
11401         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11402
11403         /* Firmware blob starts with version numbers, followed by
11404          * start address and _full_ length including BSS sections
11405          * (which must be longer than the actual data, of course
11406          */
11407
11408         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11409         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11410                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11411                            tp->fw_len, tp->fw_needed);
11412                 release_firmware(tp->fw);
11413                 tp->fw = NULL;
11414                 return -EINVAL;
11415         }
11416
11417         /* We no longer need firmware; we have it. */
11418         tp->fw_needed = NULL;
11419         return 0;
11420 }
11421
11422 static u32 tg3_irq_count(struct tg3 *tp)
11423 {
11424         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11425
11426         if (irq_cnt > 1) {
11427                 /* We want as many rx rings enabled as there are cpus.
11428                  * In multiqueue MSI-X mode, the first MSI-X vector
11429                  * only deals with link interrupts, etc, so we add
11430                  * one to the number of vectors we are requesting.
11431                  */
11432                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11433         }
11434
11435         return irq_cnt;
11436 }
11437
11438 static bool tg3_enable_msix(struct tg3 *tp)
11439 {
11440         int i, rc;
11441         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11442
11443         tp->txq_cnt = tp->txq_req;
11444         tp->rxq_cnt = tp->rxq_req;
11445         if (!tp->rxq_cnt)
11446                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11447         if (tp->rxq_cnt > tp->rxq_max)
11448                 tp->rxq_cnt = tp->rxq_max;
11449
11450         /* Disable multiple TX rings by default.  Simple round-robin hardware
11451          * scheduling of the TX rings can cause starvation of rings with
11452          * small packets when other rings have TSO or jumbo packets.
11453          */
11454         if (!tp->txq_req)
11455                 tp->txq_cnt = 1;
11456
11457         tp->irq_cnt = tg3_irq_count(tp);
11458
11459         for (i = 0; i < tp->irq_max; i++) {
11460                 msix_ent[i].entry  = i;
11461                 msix_ent[i].vector = 0;
11462         }
11463
11464         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11465         if (rc < 0) {
11466                 return false;
11467         } else if (rc < tp->irq_cnt) {
11468                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11469                               tp->irq_cnt, rc);
11470                 tp->irq_cnt = rc;
11471                 tp->rxq_cnt = max(rc - 1, 1);
11472                 if (tp->txq_cnt)
11473                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11474         }
11475
11476         for (i = 0; i < tp->irq_max; i++)
11477                 tp->napi[i].irq_vec = msix_ent[i].vector;
11478
11479         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11480                 pci_disable_msix(tp->pdev);
11481                 return false;
11482         }
11483
11484         if (tp->irq_cnt == 1)
11485                 return true;
11486
11487         tg3_flag_set(tp, ENABLE_RSS);
11488
11489         if (tp->txq_cnt > 1)
11490                 tg3_flag_set(tp, ENABLE_TSS);
11491
11492         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11493
11494         return true;
11495 }
11496
11497 static void tg3_ints_init(struct tg3 *tp)
11498 {
11499         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11500             !tg3_flag(tp, TAGGED_STATUS)) {
11501                 /* All MSI supporting chips should support tagged
11502                  * status.  Assert that this is the case.
11503                  */
11504                 netdev_warn(tp->dev,
11505                             "MSI without TAGGED_STATUS? Not using MSI\n");
11506                 goto defcfg;
11507         }
11508
11509         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11510                 tg3_flag_set(tp, USING_MSIX);
11511         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11512                 tg3_flag_set(tp, USING_MSI);
11513
11514         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11515                 u32 msi_mode = tr32(MSGINT_MODE);
11516                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11517                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11518                 if (!tg3_flag(tp, 1SHOT_MSI))
11519                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11520                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11521         }
11522 defcfg:
11523         if (!tg3_flag(tp, USING_MSIX)) {
11524                 tp->irq_cnt = 1;
11525                 tp->napi[0].irq_vec = tp->pdev->irq;
11526         }
11527
11528         if (tp->irq_cnt == 1) {
11529                 tp->txq_cnt = 1;
11530                 tp->rxq_cnt = 1;
11531                 netif_set_real_num_tx_queues(tp->dev, 1);
11532                 netif_set_real_num_rx_queues(tp->dev, 1);
11533         }
11534 }
11535
11536 static void tg3_ints_fini(struct tg3 *tp)
11537 {
11538         if (tg3_flag(tp, USING_MSIX))
11539                 pci_disable_msix(tp->pdev);
11540         else if (tg3_flag(tp, USING_MSI))
11541                 pci_disable_msi(tp->pdev);
11542         tg3_flag_clear(tp, USING_MSI);
11543         tg3_flag_clear(tp, USING_MSIX);
11544         tg3_flag_clear(tp, ENABLE_RSS);
11545         tg3_flag_clear(tp, ENABLE_TSS);
11546 }
11547
11548 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11549                      bool init)
11550 {
11551         struct net_device *dev = tp->dev;
11552         int i, err;
11553
11554         /*
11555          * Setup interrupts first so we know how
11556          * many NAPI resources to allocate
11557          */
11558         tg3_ints_init(tp);
11559
11560         tg3_rss_check_indir_tbl(tp);
11561
11562         /* The placement of this call is tied
11563          * to the setup and use of Host TX descriptors.
11564          */
11565         err = tg3_alloc_consistent(tp);
11566         if (err)
11567                 goto out_ints_fini;
11568
11569         tg3_napi_init(tp);
11570
11571         tg3_napi_enable(tp);
11572
11573         for (i = 0; i < tp->irq_cnt; i++) {
11574                 err = tg3_request_irq(tp, i);
11575                 if (err) {
11576                         for (i--; i >= 0; i--) {
11577                                 struct tg3_napi *tnapi = &tp->napi[i];
11578
11579                                 free_irq(tnapi->irq_vec, tnapi);
11580                         }
11581                         goto out_napi_fini;
11582                 }
11583         }
11584
11585         tg3_full_lock(tp, 0);
11586
11587         if (init)
11588                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11589
11590         err = tg3_init_hw(tp, reset_phy);
11591         if (err) {
11592                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11593                 tg3_free_rings(tp);
11594         }
11595
11596         tg3_full_unlock(tp);
11597
11598         if (err)
11599                 goto out_free_irq;
11600
11601         if (test_irq && tg3_flag(tp, USING_MSI)) {
11602                 err = tg3_test_msi(tp);
11603
11604                 if (err) {
11605                         tg3_full_lock(tp, 0);
11606                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11607                         tg3_free_rings(tp);
11608                         tg3_full_unlock(tp);
11609
11610                         goto out_napi_fini;
11611                 }
11612
11613                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11614                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11615
11616                         tw32(PCIE_TRANSACTION_CFG,
11617                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11618                 }
11619         }
11620
11621         tg3_phy_start(tp);
11622
11623         tg3_hwmon_open(tp);
11624
11625         tg3_full_lock(tp, 0);
11626
11627         tg3_timer_start(tp);
11628         tg3_flag_set(tp, INIT_COMPLETE);
11629         tg3_enable_ints(tp);
11630
11631         tg3_ptp_resume(tp);
11632
11633         tg3_full_unlock(tp);
11634
11635         netif_tx_start_all_queues(dev);
11636
11637         /*
11638          * Reset loopback feature if it was turned on while the device was down
11639          * make sure that it's installed properly now.
11640          */
11641         if (dev->features & NETIF_F_LOOPBACK)
11642                 tg3_set_loopback(dev, dev->features);
11643
11644         return 0;
11645
11646 out_free_irq:
11647         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11648                 struct tg3_napi *tnapi = &tp->napi[i];
11649                 free_irq(tnapi->irq_vec, tnapi);
11650         }
11651
11652 out_napi_fini:
11653         tg3_napi_disable(tp);
11654         tg3_napi_fini(tp);
11655         tg3_free_consistent(tp);
11656
11657 out_ints_fini:
11658         tg3_ints_fini(tp);
11659
11660         return err;
11661 }
11662
11663 static void tg3_stop(struct tg3 *tp)
11664 {
11665         int i;
11666
11667         tg3_reset_task_cancel(tp);
11668         tg3_netif_stop(tp);
11669
11670         tg3_timer_stop(tp);
11671
11672         tg3_hwmon_close(tp);
11673
11674         tg3_phy_stop(tp);
11675
11676         tg3_full_lock(tp, 1);
11677
11678         tg3_disable_ints(tp);
11679
11680         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11681         tg3_free_rings(tp);
11682         tg3_flag_clear(tp, INIT_COMPLETE);
11683
11684         tg3_full_unlock(tp);
11685
11686         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11687                 struct tg3_napi *tnapi = &tp->napi[i];
11688                 free_irq(tnapi->irq_vec, tnapi);
11689         }
11690
11691         tg3_ints_fini(tp);
11692
11693         tg3_napi_fini(tp);
11694
11695         tg3_free_consistent(tp);
11696 }
11697
11698 static int tg3_open(struct net_device *dev)
11699 {
11700         struct tg3 *tp = netdev_priv(dev);
11701         int err;
11702
11703         if (tp->pcierr_recovery) {
11704                 netdev_err(dev, "Failed to open device. PCI error recovery "
11705                            "in progress\n");
11706                 return -EAGAIN;
11707         }
11708
11709         if (tp->fw_needed) {
11710                 err = tg3_request_firmware(tp);
11711                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11712                         if (err) {
11713                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11714                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11715                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11716                                 netdev_warn(tp->dev, "EEE capability restored\n");
11717                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11718                         }
11719                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11720                         if (err)
11721                                 return err;
11722                 } else if (err) {
11723                         netdev_warn(tp->dev, "TSO capability disabled\n");
11724                         tg3_flag_clear(tp, TSO_CAPABLE);
11725                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11726                         netdev_notice(tp->dev, "TSO capability restored\n");
11727                         tg3_flag_set(tp, TSO_CAPABLE);
11728                 }
11729         }
11730
11731         tg3_carrier_off(tp);
11732
11733         err = tg3_power_up(tp);
11734         if (err)
11735                 return err;
11736
11737         tg3_full_lock(tp, 0);
11738
11739         tg3_disable_ints(tp);
11740         tg3_flag_clear(tp, INIT_COMPLETE);
11741
11742         tg3_full_unlock(tp);
11743
11744         err = tg3_start(tp,
11745                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11746                         true, true);
11747         if (err) {
11748                 tg3_frob_aux_power(tp, false);
11749                 pci_set_power_state(tp->pdev, PCI_D3hot);
11750         }
11751
11752         return err;
11753 }
11754
11755 static int tg3_close(struct net_device *dev)
11756 {
11757         struct tg3 *tp = netdev_priv(dev);
11758
11759         if (tp->pcierr_recovery) {
11760                 netdev_err(dev, "Failed to close device. PCI error recovery "
11761                            "in progress\n");
11762                 return -EAGAIN;
11763         }
11764
11765         tg3_stop(tp);
11766
11767         if (pci_device_is_present(tp->pdev)) {
11768                 tg3_power_down_prepare(tp);
11769
11770                 tg3_carrier_off(tp);
11771         }
11772         return 0;
11773 }
11774
11775 static inline u64 get_stat64(tg3_stat64_t *val)
11776 {
11777        return ((u64)val->high << 32) | ((u64)val->low);
11778 }
11779
11780 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11781 {
11782         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11783
11784         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11785             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11786              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11787                 u32 val;
11788
11789                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11790                         tg3_writephy(tp, MII_TG3_TEST1,
11791                                      val | MII_TG3_TEST1_CRC_EN);
11792                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11793                 } else
11794                         val = 0;
11795
11796                 tp->phy_crc_errors += val;
11797
11798                 return tp->phy_crc_errors;
11799         }
11800
11801         return get_stat64(&hw_stats->rx_fcs_errors);
11802 }
11803
11804 #define ESTAT_ADD(member) \
11805         estats->member =        old_estats->member + \
11806                                 get_stat64(&hw_stats->member)
11807
11808 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11809 {
11810         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11811         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11812
11813         ESTAT_ADD(rx_octets);
11814         ESTAT_ADD(rx_fragments);
11815         ESTAT_ADD(rx_ucast_packets);
11816         ESTAT_ADD(rx_mcast_packets);
11817         ESTAT_ADD(rx_bcast_packets);
11818         ESTAT_ADD(rx_fcs_errors);
11819         ESTAT_ADD(rx_align_errors);
11820         ESTAT_ADD(rx_xon_pause_rcvd);
11821         ESTAT_ADD(rx_xoff_pause_rcvd);
11822         ESTAT_ADD(rx_mac_ctrl_rcvd);
11823         ESTAT_ADD(rx_xoff_entered);
11824         ESTAT_ADD(rx_frame_too_long_errors);
11825         ESTAT_ADD(rx_jabbers);
11826         ESTAT_ADD(rx_undersize_packets);
11827         ESTAT_ADD(rx_in_length_errors);
11828         ESTAT_ADD(rx_out_length_errors);
11829         ESTAT_ADD(rx_64_or_less_octet_packets);
11830         ESTAT_ADD(rx_65_to_127_octet_packets);
11831         ESTAT_ADD(rx_128_to_255_octet_packets);
11832         ESTAT_ADD(rx_256_to_511_octet_packets);
11833         ESTAT_ADD(rx_512_to_1023_octet_packets);
11834         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11835         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11836         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11837         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11838         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11839
11840         ESTAT_ADD(tx_octets);
11841         ESTAT_ADD(tx_collisions);
11842         ESTAT_ADD(tx_xon_sent);
11843         ESTAT_ADD(tx_xoff_sent);
11844         ESTAT_ADD(tx_flow_control);
11845         ESTAT_ADD(tx_mac_errors);
11846         ESTAT_ADD(tx_single_collisions);
11847         ESTAT_ADD(tx_mult_collisions);
11848         ESTAT_ADD(tx_deferred);
11849         ESTAT_ADD(tx_excessive_collisions);
11850         ESTAT_ADD(tx_late_collisions);
11851         ESTAT_ADD(tx_collide_2times);
11852         ESTAT_ADD(tx_collide_3times);
11853         ESTAT_ADD(tx_collide_4times);
11854         ESTAT_ADD(tx_collide_5times);
11855         ESTAT_ADD(tx_collide_6times);
11856         ESTAT_ADD(tx_collide_7times);
11857         ESTAT_ADD(tx_collide_8times);
11858         ESTAT_ADD(tx_collide_9times);
11859         ESTAT_ADD(tx_collide_10times);
11860         ESTAT_ADD(tx_collide_11times);
11861         ESTAT_ADD(tx_collide_12times);
11862         ESTAT_ADD(tx_collide_13times);
11863         ESTAT_ADD(tx_collide_14times);
11864         ESTAT_ADD(tx_collide_15times);
11865         ESTAT_ADD(tx_ucast_packets);
11866         ESTAT_ADD(tx_mcast_packets);
11867         ESTAT_ADD(tx_bcast_packets);
11868         ESTAT_ADD(tx_carrier_sense_errors);
11869         ESTAT_ADD(tx_discards);
11870         ESTAT_ADD(tx_errors);
11871
11872         ESTAT_ADD(dma_writeq_full);
11873         ESTAT_ADD(dma_write_prioq_full);
11874         ESTAT_ADD(rxbds_empty);
11875         ESTAT_ADD(rx_discards);
11876         ESTAT_ADD(rx_errors);
11877         ESTAT_ADD(rx_threshold_hit);
11878
11879         ESTAT_ADD(dma_readq_full);
11880         ESTAT_ADD(dma_read_prioq_full);
11881         ESTAT_ADD(tx_comp_queue_full);
11882
11883         ESTAT_ADD(ring_set_send_prod_index);
11884         ESTAT_ADD(ring_status_update);
11885         ESTAT_ADD(nic_irqs);
11886         ESTAT_ADD(nic_avoided_irqs);
11887         ESTAT_ADD(nic_tx_threshold_hit);
11888
11889         ESTAT_ADD(mbuf_lwm_thresh_hit);
11890 }
11891
11892 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11893 {
11894         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11895         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11896
11897         stats->rx_packets = old_stats->rx_packets +
11898                 get_stat64(&hw_stats->rx_ucast_packets) +
11899                 get_stat64(&hw_stats->rx_mcast_packets) +
11900                 get_stat64(&hw_stats->rx_bcast_packets);
11901
11902         stats->tx_packets = old_stats->tx_packets +
11903                 get_stat64(&hw_stats->tx_ucast_packets) +
11904                 get_stat64(&hw_stats->tx_mcast_packets) +
11905                 get_stat64(&hw_stats->tx_bcast_packets);
11906
11907         stats->rx_bytes = old_stats->rx_bytes +
11908                 get_stat64(&hw_stats->rx_octets);
11909         stats->tx_bytes = old_stats->tx_bytes +
11910                 get_stat64(&hw_stats->tx_octets);
11911
11912         stats->rx_errors = old_stats->rx_errors +
11913                 get_stat64(&hw_stats->rx_errors);
11914         stats->tx_errors = old_stats->tx_errors +
11915                 get_stat64(&hw_stats->tx_errors) +
11916                 get_stat64(&hw_stats->tx_mac_errors) +
11917                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11918                 get_stat64(&hw_stats->tx_discards);
11919
11920         stats->multicast = old_stats->multicast +
11921                 get_stat64(&hw_stats->rx_mcast_packets);
11922         stats->collisions = old_stats->collisions +
11923                 get_stat64(&hw_stats->tx_collisions);
11924
11925         stats->rx_length_errors = old_stats->rx_length_errors +
11926                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11927                 get_stat64(&hw_stats->rx_undersize_packets);
11928
11929         stats->rx_frame_errors = old_stats->rx_frame_errors +
11930                 get_stat64(&hw_stats->rx_align_errors);
11931         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11932                 get_stat64(&hw_stats->tx_discards);
11933         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11934                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11935
11936         stats->rx_crc_errors = old_stats->rx_crc_errors +
11937                 tg3_calc_crc_errors(tp);
11938
11939         stats->rx_missed_errors = old_stats->rx_missed_errors +
11940                 get_stat64(&hw_stats->rx_discards);
11941
11942         stats->rx_dropped = tp->rx_dropped;
11943         stats->tx_dropped = tp->tx_dropped;
11944 }
11945
11946 static int tg3_get_regs_len(struct net_device *dev)
11947 {
11948         return TG3_REG_BLK_SIZE;
11949 }
11950
11951 static void tg3_get_regs(struct net_device *dev,
11952                 struct ethtool_regs *regs, void *_p)
11953 {
11954         struct tg3 *tp = netdev_priv(dev);
11955
11956         regs->version = 0;
11957
11958         memset(_p, 0, TG3_REG_BLK_SIZE);
11959
11960         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11961                 return;
11962
11963         tg3_full_lock(tp, 0);
11964
11965         tg3_dump_legacy_regs(tp, (u32 *)_p);
11966
11967         tg3_full_unlock(tp);
11968 }
11969
11970 static int tg3_get_eeprom_len(struct net_device *dev)
11971 {
11972         struct tg3 *tp = netdev_priv(dev);
11973
11974         return tp->nvram_size;
11975 }
11976
11977 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11978 {
11979         struct tg3 *tp = netdev_priv(dev);
11980         int ret, cpmu_restore = 0;
11981         u8  *pd;
11982         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11983         __be32 val;
11984
11985         if (tg3_flag(tp, NO_NVRAM))
11986                 return -EINVAL;
11987
11988         offset = eeprom->offset;
11989         len = eeprom->len;
11990         eeprom->len = 0;
11991
11992         eeprom->magic = TG3_EEPROM_MAGIC;
11993
11994         /* Override clock, link aware and link idle modes */
11995         if (tg3_flag(tp, CPMU_PRESENT)) {
11996                 cpmu_val = tr32(TG3_CPMU_CTRL);
11997                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11998                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11999                         tw32(TG3_CPMU_CTRL, cpmu_val &
12000                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12001                                              CPMU_CTRL_LINK_IDLE_MODE));
12002                         cpmu_restore = 1;
12003                 }
12004         }
12005         tg3_override_clk(tp);
12006
12007         if (offset & 3) {
12008                 /* adjustments to start on required 4 byte boundary */
12009                 b_offset = offset & 3;
12010                 b_count = 4 - b_offset;
12011                 if (b_count > len) {
12012                         /* i.e. offset=1 len=2 */
12013                         b_count = len;
12014                 }
12015                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12016                 if (ret)
12017                         goto eeprom_done;
12018                 memcpy(data, ((char *)&val) + b_offset, b_count);
12019                 len -= b_count;
12020                 offset += b_count;
12021                 eeprom->len += b_count;
12022         }
12023
12024         /* read bytes up to the last 4 byte boundary */
12025         pd = &data[eeprom->len];
12026         for (i = 0; i < (len - (len & 3)); i += 4) {
12027                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12028                 if (ret) {
12029                         if (i)
12030                                 i -= 4;
12031                         eeprom->len += i;
12032                         goto eeprom_done;
12033                 }
12034                 memcpy(pd + i, &val, 4);
12035                 if (need_resched()) {
12036                         if (signal_pending(current)) {
12037                                 eeprom->len += i;
12038                                 ret = -EINTR;
12039                                 goto eeprom_done;
12040                         }
12041                         cond_resched();
12042                 }
12043         }
12044         eeprom->len += i;
12045
12046         if (len & 3) {
12047                 /* read last bytes not ending on 4 byte boundary */
12048                 pd = &data[eeprom->len];
12049                 b_count = len & 3;
12050                 b_offset = offset + len - b_count;
12051                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12052                 if (ret)
12053                         goto eeprom_done;
12054                 memcpy(pd, &val, b_count);
12055                 eeprom->len += b_count;
12056         }
12057         ret = 0;
12058
12059 eeprom_done:
12060         /* Restore clock, link aware and link idle modes */
12061         tg3_restore_clk(tp);
12062         if (cpmu_restore)
12063                 tw32(TG3_CPMU_CTRL, cpmu_val);
12064
12065         return ret;
12066 }
12067
12068 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12069 {
12070         struct tg3 *tp = netdev_priv(dev);
12071         int ret;
12072         u32 offset, len, b_offset, odd_len;
12073         u8 *buf;
12074         __be32 start = 0, end;
12075
12076         if (tg3_flag(tp, NO_NVRAM) ||
12077             eeprom->magic != TG3_EEPROM_MAGIC)
12078                 return -EINVAL;
12079
12080         offset = eeprom->offset;
12081         len = eeprom->len;
12082
12083         if ((b_offset = (offset & 3))) {
12084                 /* adjustments to start on required 4 byte boundary */
12085                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12086                 if (ret)
12087                         return ret;
12088                 len += b_offset;
12089                 offset &= ~3;
12090                 if (len < 4)
12091                         len = 4;
12092         }
12093
12094         odd_len = 0;
12095         if (len & 3) {
12096                 /* adjustments to end on required 4 byte boundary */
12097                 odd_len = 1;
12098                 len = (len + 3) & ~3;
12099                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12100                 if (ret)
12101                         return ret;
12102         }
12103
12104         buf = data;
12105         if (b_offset || odd_len) {
12106                 buf = kmalloc(len, GFP_KERNEL);
12107                 if (!buf)
12108                         return -ENOMEM;
12109                 if (b_offset)
12110                         memcpy(buf, &start, 4);
12111                 if (odd_len)
12112                         memcpy(buf+len-4, &end, 4);
12113                 memcpy(buf + b_offset, data, eeprom->len);
12114         }
12115
12116         ret = tg3_nvram_write_block(tp, offset, len, buf);
12117
12118         if (buf != data)
12119                 kfree(buf);
12120
12121         return ret;
12122 }
12123
12124 static int tg3_get_link_ksettings(struct net_device *dev,
12125                                   struct ethtool_link_ksettings *cmd)
12126 {
12127         struct tg3 *tp = netdev_priv(dev);
12128         u32 supported, advertising;
12129
12130         if (tg3_flag(tp, USE_PHYLIB)) {
12131                 struct phy_device *phydev;
12132                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12133                         return -EAGAIN;
12134                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12135                 phy_ethtool_ksettings_get(phydev, cmd);
12136
12137                 return 0;
12138         }
12139
12140         supported = (SUPPORTED_Autoneg);
12141
12142         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12143                 supported |= (SUPPORTED_1000baseT_Half |
12144                               SUPPORTED_1000baseT_Full);
12145
12146         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12147                 supported |= (SUPPORTED_100baseT_Half |
12148                               SUPPORTED_100baseT_Full |
12149                               SUPPORTED_10baseT_Half |
12150                               SUPPORTED_10baseT_Full |
12151                               SUPPORTED_TP);
12152                 cmd->base.port = PORT_TP;
12153         } else {
12154                 supported |= SUPPORTED_FIBRE;
12155                 cmd->base.port = PORT_FIBRE;
12156         }
12157         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12158                                                 supported);
12159
12160         advertising = tp->link_config.advertising;
12161         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12162                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12163                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12164                                 advertising |= ADVERTISED_Pause;
12165                         } else {
12166                                 advertising |= ADVERTISED_Pause |
12167                                         ADVERTISED_Asym_Pause;
12168                         }
12169                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12170                         advertising |= ADVERTISED_Asym_Pause;
12171                 }
12172         }
12173         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12174                                                 advertising);
12175
12176         if (netif_running(dev) && tp->link_up) {
12177                 cmd->base.speed = tp->link_config.active_speed;
12178                 cmd->base.duplex = tp->link_config.active_duplex;
12179                 ethtool_convert_legacy_u32_to_link_mode(
12180                         cmd->link_modes.lp_advertising,
12181                         tp->link_config.rmt_adv);
12182
12183                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12184                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12185                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12186                         else
12187                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12188                 }
12189         } else {
12190                 cmd->base.speed = SPEED_UNKNOWN;
12191                 cmd->base.duplex = DUPLEX_UNKNOWN;
12192                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12193         }
12194         cmd->base.phy_address = tp->phy_addr;
12195         cmd->base.autoneg = tp->link_config.autoneg;
12196         return 0;
12197 }
12198
12199 static int tg3_set_link_ksettings(struct net_device *dev,
12200                                   const struct ethtool_link_ksettings *cmd)
12201 {
12202         struct tg3 *tp = netdev_priv(dev);
12203         u32 speed = cmd->base.speed;
12204         u32 advertising;
12205
12206         if (tg3_flag(tp, USE_PHYLIB)) {
12207                 struct phy_device *phydev;
12208                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12209                         return -EAGAIN;
12210                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12211                 return phy_ethtool_ksettings_set(phydev, cmd);
12212         }
12213
12214         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12215             cmd->base.autoneg != AUTONEG_DISABLE)
12216                 return -EINVAL;
12217
12218         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12219             cmd->base.duplex != DUPLEX_FULL &&
12220             cmd->base.duplex != DUPLEX_HALF)
12221                 return -EINVAL;
12222
12223         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12224                                                 cmd->link_modes.advertising);
12225
12226         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12227                 u32 mask = ADVERTISED_Autoneg |
12228                            ADVERTISED_Pause |
12229                            ADVERTISED_Asym_Pause;
12230
12231                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12232                         mask |= ADVERTISED_1000baseT_Half |
12233                                 ADVERTISED_1000baseT_Full;
12234
12235                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12236                         mask |= ADVERTISED_100baseT_Half |
12237                                 ADVERTISED_100baseT_Full |
12238                                 ADVERTISED_10baseT_Half |
12239                                 ADVERTISED_10baseT_Full |
12240                                 ADVERTISED_TP;
12241                 else
12242                         mask |= ADVERTISED_FIBRE;
12243
12244                 if (advertising & ~mask)
12245                         return -EINVAL;
12246
12247                 mask &= (ADVERTISED_1000baseT_Half |
12248                          ADVERTISED_1000baseT_Full |
12249                          ADVERTISED_100baseT_Half |
12250                          ADVERTISED_100baseT_Full |
12251                          ADVERTISED_10baseT_Half |
12252                          ADVERTISED_10baseT_Full);
12253
12254                 advertising &= mask;
12255         } else {
12256                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12257                         if (speed != SPEED_1000)
12258                                 return -EINVAL;
12259
12260                         if (cmd->base.duplex != DUPLEX_FULL)
12261                                 return -EINVAL;
12262                 } else {
12263                         if (speed != SPEED_100 &&
12264                             speed != SPEED_10)
12265                                 return -EINVAL;
12266                 }
12267         }
12268
12269         tg3_full_lock(tp, 0);
12270
12271         tp->link_config.autoneg = cmd->base.autoneg;
12272         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12273                 tp->link_config.advertising = (advertising |
12274                                               ADVERTISED_Autoneg);
12275                 tp->link_config.speed = SPEED_UNKNOWN;
12276                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12277         } else {
12278                 tp->link_config.advertising = 0;
12279                 tp->link_config.speed = speed;
12280                 tp->link_config.duplex = cmd->base.duplex;
12281         }
12282
12283         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12284
12285         tg3_warn_mgmt_link_flap(tp);
12286
12287         if (netif_running(dev))
12288                 tg3_setup_phy(tp, true);
12289
12290         tg3_full_unlock(tp);
12291
12292         return 0;
12293 }
12294
12295 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12296 {
12297         struct tg3 *tp = netdev_priv(dev);
12298
12299         strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12300         strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12301         strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12302 }
12303
12304 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12305 {
12306         struct tg3 *tp = netdev_priv(dev);
12307
12308         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12309                 wol->supported = WAKE_MAGIC;
12310         else
12311                 wol->supported = 0;
12312         wol->wolopts = 0;
12313         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12314                 wol->wolopts = WAKE_MAGIC;
12315         memset(&wol->sopass, 0, sizeof(wol->sopass));
12316 }
12317
12318 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12319 {
12320         struct tg3 *tp = netdev_priv(dev);
12321         struct device *dp = &tp->pdev->dev;
12322
12323         if (wol->wolopts & ~WAKE_MAGIC)
12324                 return -EINVAL;
12325         if ((wol->wolopts & WAKE_MAGIC) &&
12326             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12327                 return -EINVAL;
12328
12329         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12330
12331         if (device_may_wakeup(dp))
12332                 tg3_flag_set(tp, WOL_ENABLE);
12333         else
12334                 tg3_flag_clear(tp, WOL_ENABLE);
12335
12336         return 0;
12337 }
12338
12339 static u32 tg3_get_msglevel(struct net_device *dev)
12340 {
12341         struct tg3 *tp = netdev_priv(dev);
12342         return tp->msg_enable;
12343 }
12344
12345 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12346 {
12347         struct tg3 *tp = netdev_priv(dev);
12348         tp->msg_enable = value;
12349 }
12350
12351 static int tg3_nway_reset(struct net_device *dev)
12352 {
12353         struct tg3 *tp = netdev_priv(dev);
12354         int r;
12355
12356         if (!netif_running(dev))
12357                 return -EAGAIN;
12358
12359         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12360                 return -EINVAL;
12361
12362         tg3_warn_mgmt_link_flap(tp);
12363
12364         if (tg3_flag(tp, USE_PHYLIB)) {
12365                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12366                         return -EAGAIN;
12367                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12368         } else {
12369                 u32 bmcr;
12370
12371                 spin_lock_bh(&tp->lock);
12372                 r = -EINVAL;
12373                 tg3_readphy(tp, MII_BMCR, &bmcr);
12374                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12375                     ((bmcr & BMCR_ANENABLE) ||
12376                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12377                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12378                                                    BMCR_ANENABLE);
12379                         r = 0;
12380                 }
12381                 spin_unlock_bh(&tp->lock);
12382         }
12383
12384         return r;
12385 }
12386
12387 static void tg3_get_ringparam(struct net_device *dev,
12388                               struct ethtool_ringparam *ering,
12389                               struct kernel_ethtool_ringparam *kernel_ering,
12390                               struct netlink_ext_ack *extack)
12391 {
12392         struct tg3 *tp = netdev_priv(dev);
12393
12394         ering->rx_max_pending = tp->rx_std_ring_mask;
12395         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12396                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12397         else
12398                 ering->rx_jumbo_max_pending = 0;
12399
12400         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12401
12402         ering->rx_pending = tp->rx_pending;
12403         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12404                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12405         else
12406                 ering->rx_jumbo_pending = 0;
12407
12408         ering->tx_pending = tp->napi[0].tx_pending;
12409 }
12410
12411 static int tg3_set_ringparam(struct net_device *dev,
12412                              struct ethtool_ringparam *ering,
12413                              struct kernel_ethtool_ringparam *kernel_ering,
12414                              struct netlink_ext_ack *extack)
12415 {
12416         struct tg3 *tp = netdev_priv(dev);
12417         int i, irq_sync = 0, err = 0;
12418         bool reset_phy = false;
12419
12420         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12421             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12422             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12423             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12424             (tg3_flag(tp, TSO_BUG) &&
12425              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12426                 return -EINVAL;
12427
12428         if (netif_running(dev)) {
12429                 tg3_phy_stop(tp);
12430                 tg3_netif_stop(tp);
12431                 irq_sync = 1;
12432         }
12433
12434         tg3_full_lock(tp, irq_sync);
12435
12436         tp->rx_pending = ering->rx_pending;
12437
12438         if (tg3_flag(tp, MAX_RXPEND_64) &&
12439             tp->rx_pending > 63)
12440                 tp->rx_pending = 63;
12441
12442         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12443                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12444
12445         for (i = 0; i < tp->irq_max; i++)
12446                 tp->napi[i].tx_pending = ering->tx_pending;
12447
12448         if (netif_running(dev)) {
12449                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12450                 /* Reset PHY to avoid PHY lock up */
12451                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12452                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12453                     tg3_asic_rev(tp) == ASIC_REV_5720)
12454                         reset_phy = true;
12455
12456                 err = tg3_restart_hw(tp, reset_phy);
12457                 if (!err)
12458                         tg3_netif_start(tp);
12459         }
12460
12461         tg3_full_unlock(tp);
12462
12463         if (irq_sync && !err)
12464                 tg3_phy_start(tp);
12465
12466         return err;
12467 }
12468
12469 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12470 {
12471         struct tg3 *tp = netdev_priv(dev);
12472
12473         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12474
12475         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12476                 epause->rx_pause = 1;
12477         else
12478                 epause->rx_pause = 0;
12479
12480         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12481                 epause->tx_pause = 1;
12482         else
12483                 epause->tx_pause = 0;
12484 }
12485
12486 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12487 {
12488         struct tg3 *tp = netdev_priv(dev);
12489         int err = 0;
12490         bool reset_phy = false;
12491
12492         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12493                 tg3_warn_mgmt_link_flap(tp);
12494
12495         if (tg3_flag(tp, USE_PHYLIB)) {
12496                 struct phy_device *phydev;
12497
12498                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12499
12500                 if (!phy_validate_pause(phydev, epause))
12501                         return -EINVAL;
12502
12503                 tp->link_config.flowctrl = 0;
12504                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12505                 if (epause->rx_pause) {
12506                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12507
12508                         if (epause->tx_pause) {
12509                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12510                         }
12511                 } else if (epause->tx_pause) {
12512                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12513                 }
12514
12515                 if (epause->autoneg)
12516                         tg3_flag_set(tp, PAUSE_AUTONEG);
12517                 else
12518                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12519
12520                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12521                         if (phydev->autoneg) {
12522                                 /* phy_set_asym_pause() will
12523                                  * renegotiate the link to inform our
12524                                  * link partner of our flow control
12525                                  * settings, even if the flow control
12526                                  * is forced.  Let tg3_adjust_link()
12527                                  * do the final flow control setup.
12528                                  */
12529                                 return 0;
12530                         }
12531
12532                         if (!epause->autoneg)
12533                                 tg3_setup_flow_control(tp, 0, 0);
12534                 }
12535         } else {
12536                 int irq_sync = 0;
12537
12538                 if (netif_running(dev)) {
12539                         tg3_netif_stop(tp);
12540                         irq_sync = 1;
12541                 }
12542
12543                 tg3_full_lock(tp, irq_sync);
12544
12545                 if (epause->autoneg)
12546                         tg3_flag_set(tp, PAUSE_AUTONEG);
12547                 else
12548                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12549                 if (epause->rx_pause)
12550                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12551                 else
12552                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12553                 if (epause->tx_pause)
12554                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12555                 else
12556                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12557
12558                 if (netif_running(dev)) {
12559                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12560                         /* Reset PHY to avoid PHY lock up */
12561                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12562                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12563                             tg3_asic_rev(tp) == ASIC_REV_5720)
12564                                 reset_phy = true;
12565
12566                         err = tg3_restart_hw(tp, reset_phy);
12567                         if (!err)
12568                                 tg3_netif_start(tp);
12569                 }
12570
12571                 tg3_full_unlock(tp);
12572         }
12573
12574         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12575
12576         return err;
12577 }
12578
12579 static int tg3_get_sset_count(struct net_device *dev, int sset)
12580 {
12581         switch (sset) {
12582         case ETH_SS_TEST:
12583                 return TG3_NUM_TEST;
12584         case ETH_SS_STATS:
12585                 return TG3_NUM_STATS;
12586         default:
12587                 return -EOPNOTSUPP;
12588         }
12589 }
12590
12591 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12592                          u32 *rules __always_unused)
12593 {
12594         struct tg3 *tp = netdev_priv(dev);
12595
12596         if (!tg3_flag(tp, SUPPORT_MSIX))
12597                 return -EOPNOTSUPP;
12598
12599         switch (info->cmd) {
12600         case ETHTOOL_GRXRINGS:
12601                 if (netif_running(tp->dev))
12602                         info->data = tp->rxq_cnt;
12603                 else {
12604                         info->data = num_online_cpus();
12605                         if (info->data > TG3_RSS_MAX_NUM_QS)
12606                                 info->data = TG3_RSS_MAX_NUM_QS;
12607                 }
12608
12609                 return 0;
12610
12611         default:
12612                 return -EOPNOTSUPP;
12613         }
12614 }
12615
12616 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12617 {
12618         u32 size = 0;
12619         struct tg3 *tp = netdev_priv(dev);
12620
12621         if (tg3_flag(tp, SUPPORT_MSIX))
12622                 size = TG3_RSS_INDIR_TBL_SIZE;
12623
12624         return size;
12625 }
12626
12627 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12628 {
12629         struct tg3 *tp = netdev_priv(dev);
12630         int i;
12631
12632         if (hfunc)
12633                 *hfunc = ETH_RSS_HASH_TOP;
12634         if (!indir)
12635                 return 0;
12636
12637         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12638                 indir[i] = tp->rss_ind_tbl[i];
12639
12640         return 0;
12641 }
12642
12643 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12644                         const u8 hfunc)
12645 {
12646         struct tg3 *tp = netdev_priv(dev);
12647         size_t i;
12648
12649         /* We require at least one supported parameter to be changed and no
12650          * change in any of the unsupported parameters
12651          */
12652         if (key ||
12653             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12654                 return -EOPNOTSUPP;
12655
12656         if (!indir)
12657                 return 0;
12658
12659         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12660                 tp->rss_ind_tbl[i] = indir[i];
12661
12662         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12663                 return 0;
12664
12665         /* It is legal to write the indirection
12666          * table while the device is running.
12667          */
12668         tg3_full_lock(tp, 0);
12669         tg3_rss_write_indir_tbl(tp);
12670         tg3_full_unlock(tp);
12671
12672         return 0;
12673 }
12674
12675 static void tg3_get_channels(struct net_device *dev,
12676                              struct ethtool_channels *channel)
12677 {
12678         struct tg3 *tp = netdev_priv(dev);
12679         u32 deflt_qs = netif_get_num_default_rss_queues();
12680
12681         channel->max_rx = tp->rxq_max;
12682         channel->max_tx = tp->txq_max;
12683
12684         if (netif_running(dev)) {
12685                 channel->rx_count = tp->rxq_cnt;
12686                 channel->tx_count = tp->txq_cnt;
12687         } else {
12688                 if (tp->rxq_req)
12689                         channel->rx_count = tp->rxq_req;
12690                 else
12691                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12692
12693                 if (tp->txq_req)
12694                         channel->tx_count = tp->txq_req;
12695                 else
12696                         channel->tx_count = min(deflt_qs, tp->txq_max);
12697         }
12698 }
12699
12700 static int tg3_set_channels(struct net_device *dev,
12701                             struct ethtool_channels *channel)
12702 {
12703         struct tg3 *tp = netdev_priv(dev);
12704
12705         if (!tg3_flag(tp, SUPPORT_MSIX))
12706                 return -EOPNOTSUPP;
12707
12708         if (channel->rx_count > tp->rxq_max ||
12709             channel->tx_count > tp->txq_max)
12710                 return -EINVAL;
12711
12712         tp->rxq_req = channel->rx_count;
12713         tp->txq_req = channel->tx_count;
12714
12715         if (!netif_running(dev))
12716                 return 0;
12717
12718         tg3_stop(tp);
12719
12720         tg3_carrier_off(tp);
12721
12722         tg3_start(tp, true, false, false);
12723
12724         return 0;
12725 }
12726
12727 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12728 {
12729         switch (stringset) {
12730         case ETH_SS_STATS:
12731                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12732                 break;
12733         case ETH_SS_TEST:
12734                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12735                 break;
12736         default:
12737                 WARN_ON(1);     /* we need a WARN() */
12738                 break;
12739         }
12740 }
12741
12742 static int tg3_set_phys_id(struct net_device *dev,
12743                             enum ethtool_phys_id_state state)
12744 {
12745         struct tg3 *tp = netdev_priv(dev);
12746
12747         switch (state) {
12748         case ETHTOOL_ID_ACTIVE:
12749                 return 1;       /* cycle on/off once per second */
12750
12751         case ETHTOOL_ID_ON:
12752                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12753                      LED_CTRL_1000MBPS_ON |
12754                      LED_CTRL_100MBPS_ON |
12755                      LED_CTRL_10MBPS_ON |
12756                      LED_CTRL_TRAFFIC_OVERRIDE |
12757                      LED_CTRL_TRAFFIC_BLINK |
12758                      LED_CTRL_TRAFFIC_LED);
12759                 break;
12760
12761         case ETHTOOL_ID_OFF:
12762                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12763                      LED_CTRL_TRAFFIC_OVERRIDE);
12764                 break;
12765
12766         case ETHTOOL_ID_INACTIVE:
12767                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12768                 break;
12769         }
12770
12771         return 0;
12772 }
12773
12774 static void tg3_get_ethtool_stats(struct net_device *dev,
12775                                    struct ethtool_stats *estats, u64 *tmp_stats)
12776 {
12777         struct tg3 *tp = netdev_priv(dev);
12778
12779         if (tp->hw_stats)
12780                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12781         else
12782                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12783 }
12784
12785 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12786 {
12787         int i;
12788         __be32 *buf;
12789         u32 offset = 0, len = 0;
12790         u32 magic, val;
12791
12792         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12793                 return NULL;
12794
12795         if (magic == TG3_EEPROM_MAGIC) {
12796                 for (offset = TG3_NVM_DIR_START;
12797                      offset < TG3_NVM_DIR_END;
12798                      offset += TG3_NVM_DIRENT_SIZE) {
12799                         if (tg3_nvram_read(tp, offset, &val))
12800                                 return NULL;
12801
12802                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12803                             TG3_NVM_DIRTYPE_EXTVPD)
12804                                 break;
12805                 }
12806
12807                 if (offset != TG3_NVM_DIR_END) {
12808                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12809                         if (tg3_nvram_read(tp, offset + 4, &offset))
12810                                 return NULL;
12811
12812                         offset = tg3_nvram_logical_addr(tp, offset);
12813                 }
12814
12815                 if (!offset || !len) {
12816                         offset = TG3_NVM_VPD_OFF;
12817                         len = TG3_NVM_VPD_LEN;
12818                 }
12819
12820                 buf = kmalloc(len, GFP_KERNEL);
12821                 if (!buf)
12822                         return NULL;
12823
12824                 for (i = 0; i < len; i += 4) {
12825                         /* The data is in little-endian format in NVRAM.
12826                          * Use the big-endian read routines to preserve
12827                          * the byte order as it exists in NVRAM.
12828                          */
12829                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12830                                 goto error;
12831                 }
12832                 *vpdlen = len;
12833         } else {
12834                 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12835                 if (IS_ERR(buf))
12836                         return NULL;
12837         }
12838
12839         return buf;
12840
12841 error:
12842         kfree(buf);
12843         return NULL;
12844 }
12845
12846 #define NVRAM_TEST_SIZE 0x100
12847 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12848 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12849 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12850 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12851 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12852 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12853 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12854 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12855
12856 static int tg3_test_nvram(struct tg3 *tp)
12857 {
12858         u32 csum, magic;
12859         __be32 *buf;
12860         int i, j, k, err = 0, size;
12861         unsigned int len;
12862
12863         if (tg3_flag(tp, NO_NVRAM))
12864                 return 0;
12865
12866         if (tg3_nvram_read(tp, 0, &magic) != 0)
12867                 return -EIO;
12868
12869         if (magic == TG3_EEPROM_MAGIC)
12870                 size = NVRAM_TEST_SIZE;
12871         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12872                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12873                     TG3_EEPROM_SB_FORMAT_1) {
12874                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12875                         case TG3_EEPROM_SB_REVISION_0:
12876                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12877                                 break;
12878                         case TG3_EEPROM_SB_REVISION_2:
12879                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12880                                 break;
12881                         case TG3_EEPROM_SB_REVISION_3:
12882                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12883                                 break;
12884                         case TG3_EEPROM_SB_REVISION_4:
12885                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12886                                 break;
12887                         case TG3_EEPROM_SB_REVISION_5:
12888                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12889                                 break;
12890                         case TG3_EEPROM_SB_REVISION_6:
12891                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12892                                 break;
12893                         default:
12894                                 return -EIO;
12895                         }
12896                 } else
12897                         return 0;
12898         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12899                 size = NVRAM_SELFBOOT_HW_SIZE;
12900         else
12901                 return -EIO;
12902
12903         buf = kmalloc(size, GFP_KERNEL);
12904         if (buf == NULL)
12905                 return -ENOMEM;
12906
12907         err = -EIO;
12908         for (i = 0, j = 0; i < size; i += 4, j++) {
12909                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12910                 if (err)
12911                         break;
12912         }
12913         if (i < size)
12914                 goto out;
12915
12916         /* Selfboot format */
12917         magic = be32_to_cpu(buf[0]);
12918         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12919             TG3_EEPROM_MAGIC_FW) {
12920                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12921
12922                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12923                     TG3_EEPROM_SB_REVISION_2) {
12924                         /* For rev 2, the csum doesn't include the MBA. */
12925                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12926                                 csum8 += buf8[i];
12927                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12928                                 csum8 += buf8[i];
12929                 } else {
12930                         for (i = 0; i < size; i++)
12931                                 csum8 += buf8[i];
12932                 }
12933
12934                 if (csum8 == 0) {
12935                         err = 0;
12936                         goto out;
12937                 }
12938
12939                 err = -EIO;
12940                 goto out;
12941         }
12942
12943         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12944             TG3_EEPROM_MAGIC_HW) {
12945                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12946                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12947                 u8 *buf8 = (u8 *) buf;
12948
12949                 /* Separate the parity bits and the data bytes.  */
12950                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12951                         if ((i == 0) || (i == 8)) {
12952                                 int l;
12953                                 u8 msk;
12954
12955                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12956                                         parity[k++] = buf8[i] & msk;
12957                                 i++;
12958                         } else if (i == 16) {
12959                                 int l;
12960                                 u8 msk;
12961
12962                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12963                                         parity[k++] = buf8[i] & msk;
12964                                 i++;
12965
12966                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12967                                         parity[k++] = buf8[i] & msk;
12968                                 i++;
12969                         }
12970                         data[j++] = buf8[i];
12971                 }
12972
12973                 err = -EIO;
12974                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12975                         u8 hw8 = hweight8(data[i]);
12976
12977                         if ((hw8 & 0x1) && parity[i])
12978                                 goto out;
12979                         else if (!(hw8 & 0x1) && !parity[i])
12980                                 goto out;
12981                 }
12982                 err = 0;
12983                 goto out;
12984         }
12985
12986         err = -EIO;
12987
12988         /* Bootstrap checksum at offset 0x10 */
12989         csum = calc_crc((unsigned char *) buf, 0x10);
12990         if (csum != le32_to_cpu(buf[0x10/4]))
12991                 goto out;
12992
12993         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12994         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12995         if (csum != le32_to_cpu(buf[0xfc/4]))
12996                 goto out;
12997
12998         kfree(buf);
12999
13000         buf = tg3_vpd_readblock(tp, &len);
13001         if (!buf)
13002                 return -ENOMEM;
13003
13004         err = pci_vpd_check_csum(buf, len);
13005         /* go on if no checksum found */
13006         if (err == 1)
13007                 err = 0;
13008 out:
13009         kfree(buf);
13010         return err;
13011 }
13012
13013 #define TG3_SERDES_TIMEOUT_SEC  2
13014 #define TG3_COPPER_TIMEOUT_SEC  6
13015
13016 static int tg3_test_link(struct tg3 *tp)
13017 {
13018         int i, max;
13019
13020         if (!netif_running(tp->dev))
13021                 return -ENODEV;
13022
13023         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13024                 max = TG3_SERDES_TIMEOUT_SEC;
13025         else
13026                 max = TG3_COPPER_TIMEOUT_SEC;
13027
13028         for (i = 0; i < max; i++) {
13029                 if (tp->link_up)
13030                         return 0;
13031
13032                 if (msleep_interruptible(1000))
13033                         break;
13034         }
13035
13036         return -EIO;
13037 }
13038
13039 /* Only test the commonly used registers */
13040 static int tg3_test_registers(struct tg3 *tp)
13041 {
13042         int i, is_5705, is_5750;
13043         u32 offset, read_mask, write_mask, val, save_val, read_val;
13044         static struct {
13045                 u16 offset;
13046                 u16 flags;
13047 #define TG3_FL_5705     0x1
13048 #define TG3_FL_NOT_5705 0x2
13049 #define TG3_FL_NOT_5788 0x4
13050 #define TG3_FL_NOT_5750 0x8
13051                 u32 read_mask;
13052                 u32 write_mask;
13053         } reg_tbl[] = {
13054                 /* MAC Control Registers */
13055                 { MAC_MODE, TG3_FL_NOT_5705,
13056                         0x00000000, 0x00ef6f8c },
13057                 { MAC_MODE, TG3_FL_5705,
13058                         0x00000000, 0x01ef6b8c },
13059                 { MAC_STATUS, TG3_FL_NOT_5705,
13060                         0x03800107, 0x00000000 },
13061                 { MAC_STATUS, TG3_FL_5705,
13062                         0x03800100, 0x00000000 },
13063                 { MAC_ADDR_0_HIGH, 0x0000,
13064                         0x00000000, 0x0000ffff },
13065                 { MAC_ADDR_0_LOW, 0x0000,
13066                         0x00000000, 0xffffffff },
13067                 { MAC_RX_MTU_SIZE, 0x0000,
13068                         0x00000000, 0x0000ffff },
13069                 { MAC_TX_MODE, 0x0000,
13070                         0x00000000, 0x00000070 },
13071                 { MAC_TX_LENGTHS, 0x0000,
13072                         0x00000000, 0x00003fff },
13073                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13074                         0x00000000, 0x000007fc },
13075                 { MAC_RX_MODE, TG3_FL_5705,
13076                         0x00000000, 0x000007dc },
13077                 { MAC_HASH_REG_0, 0x0000,
13078                         0x00000000, 0xffffffff },
13079                 { MAC_HASH_REG_1, 0x0000,
13080                         0x00000000, 0xffffffff },
13081                 { MAC_HASH_REG_2, 0x0000,
13082                         0x00000000, 0xffffffff },
13083                 { MAC_HASH_REG_3, 0x0000,
13084                         0x00000000, 0xffffffff },
13085
13086                 /* Receive Data and Receive BD Initiator Control Registers. */
13087                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13088                         0x00000000, 0xffffffff },
13089                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13090                         0x00000000, 0xffffffff },
13091                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13092                         0x00000000, 0x00000003 },
13093                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13094                         0x00000000, 0xffffffff },
13095                 { RCVDBDI_STD_BD+0, 0x0000,
13096                         0x00000000, 0xffffffff },
13097                 { RCVDBDI_STD_BD+4, 0x0000,
13098                         0x00000000, 0xffffffff },
13099                 { RCVDBDI_STD_BD+8, 0x0000,
13100                         0x00000000, 0xffff0002 },
13101                 { RCVDBDI_STD_BD+0xc, 0x0000,
13102                         0x00000000, 0xffffffff },
13103
13104                 /* Receive BD Initiator Control Registers. */
13105                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13106                         0x00000000, 0xffffffff },
13107                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13108                         0x00000000, 0x000003ff },
13109                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13110                         0x00000000, 0xffffffff },
13111
13112                 /* Host Coalescing Control Registers. */
13113                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13114                         0x00000000, 0x00000004 },
13115                 { HOSTCC_MODE, TG3_FL_5705,
13116                         0x00000000, 0x000000f6 },
13117                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13118                         0x00000000, 0xffffffff },
13119                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13120                         0x00000000, 0x000003ff },
13121                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13122                         0x00000000, 0xffffffff },
13123                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13124                         0x00000000, 0x000003ff },
13125                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13126                         0x00000000, 0xffffffff },
13127                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13128                         0x00000000, 0x000000ff },
13129                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13130                         0x00000000, 0xffffffff },
13131                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13132                         0x00000000, 0x000000ff },
13133                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13134                         0x00000000, 0xffffffff },
13135                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13136                         0x00000000, 0xffffffff },
13137                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13138                         0x00000000, 0xffffffff },
13139                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13140                         0x00000000, 0x000000ff },
13141                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13142                         0x00000000, 0xffffffff },
13143                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13144                         0x00000000, 0x000000ff },
13145                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13146                         0x00000000, 0xffffffff },
13147                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13148                         0x00000000, 0xffffffff },
13149                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13150                         0x00000000, 0xffffffff },
13151                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13152                         0x00000000, 0xffffffff },
13153                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13154                         0x00000000, 0xffffffff },
13155                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13156                         0xffffffff, 0x00000000 },
13157                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13158                         0xffffffff, 0x00000000 },
13159
13160                 /* Buffer Manager Control Registers. */
13161                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13162                         0x00000000, 0x007fff80 },
13163                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13164                         0x00000000, 0x007fffff },
13165                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13166                         0x00000000, 0x0000003f },
13167                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13168                         0x00000000, 0x000001ff },
13169                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13170                         0x00000000, 0x000001ff },
13171                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13172                         0xffffffff, 0x00000000 },
13173                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13174                         0xffffffff, 0x00000000 },
13175
13176                 /* Mailbox Registers */
13177                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13178                         0x00000000, 0x000001ff },
13179                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13180                         0x00000000, 0x000001ff },
13181                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13182                         0x00000000, 0x000007ff },
13183                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13184                         0x00000000, 0x000001ff },
13185
13186                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13187         };
13188
13189         is_5705 = is_5750 = 0;
13190         if (tg3_flag(tp, 5705_PLUS)) {
13191                 is_5705 = 1;
13192                 if (tg3_flag(tp, 5750_PLUS))
13193                         is_5750 = 1;
13194         }
13195
13196         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13197                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13198                         continue;
13199
13200                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13201                         continue;
13202
13203                 if (tg3_flag(tp, IS_5788) &&
13204                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13205                         continue;
13206
13207                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13208                         continue;
13209
13210                 offset = (u32) reg_tbl[i].offset;
13211                 read_mask = reg_tbl[i].read_mask;
13212                 write_mask = reg_tbl[i].write_mask;
13213
13214                 /* Save the original register content */
13215                 save_val = tr32(offset);
13216
13217                 /* Determine the read-only value. */
13218                 read_val = save_val & read_mask;
13219
13220                 /* Write zero to the register, then make sure the read-only bits
13221                  * are not changed and the read/write bits are all zeros.
13222                  */
13223                 tw32(offset, 0);
13224
13225                 val = tr32(offset);
13226
13227                 /* Test the read-only and read/write bits. */
13228                 if (((val & read_mask) != read_val) || (val & write_mask))
13229                         goto out;
13230
13231                 /* Write ones to all the bits defined by RdMask and WrMask, then
13232                  * make sure the read-only bits are not changed and the
13233                  * read/write bits are all ones.
13234                  */
13235                 tw32(offset, read_mask | write_mask);
13236
13237                 val = tr32(offset);
13238
13239                 /* Test the read-only bits. */
13240                 if ((val & read_mask) != read_val)
13241                         goto out;
13242
13243                 /* Test the read/write bits. */
13244                 if ((val & write_mask) != write_mask)
13245                         goto out;
13246
13247                 tw32(offset, save_val);
13248         }
13249
13250         return 0;
13251
13252 out:
13253         if (netif_msg_hw(tp))
13254                 netdev_err(tp->dev,
13255                            "Register test failed at offset %x\n", offset);
13256         tw32(offset, save_val);
13257         return -EIO;
13258 }
13259
13260 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13261 {
13262         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13263         int i;
13264         u32 j;
13265
13266         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13267                 for (j = 0; j < len; j += 4) {
13268                         u32 val;
13269
13270                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13271                         tg3_read_mem(tp, offset + j, &val);
13272                         if (val != test_pattern[i])
13273                                 return -EIO;
13274                 }
13275         }
13276         return 0;
13277 }
13278
13279 static int tg3_test_memory(struct tg3 *tp)
13280 {
13281         static struct mem_entry {
13282                 u32 offset;
13283                 u32 len;
13284         } mem_tbl_570x[] = {
13285                 { 0x00000000, 0x00b50},
13286                 { 0x00002000, 0x1c000},
13287                 { 0xffffffff, 0x00000}
13288         }, mem_tbl_5705[] = {
13289                 { 0x00000100, 0x0000c},
13290                 { 0x00000200, 0x00008},
13291                 { 0x00004000, 0x00800},
13292                 { 0x00006000, 0x01000},
13293                 { 0x00008000, 0x02000},
13294                 { 0x00010000, 0x0e000},
13295                 { 0xffffffff, 0x00000}
13296         }, mem_tbl_5755[] = {
13297                 { 0x00000200, 0x00008},
13298                 { 0x00004000, 0x00800},
13299                 { 0x00006000, 0x00800},
13300                 { 0x00008000, 0x02000},
13301                 { 0x00010000, 0x0c000},
13302                 { 0xffffffff, 0x00000}
13303         }, mem_tbl_5906[] = {
13304                 { 0x00000200, 0x00008},
13305                 { 0x00004000, 0x00400},
13306                 { 0x00006000, 0x00400},
13307                 { 0x00008000, 0x01000},
13308                 { 0x00010000, 0x01000},
13309                 { 0xffffffff, 0x00000}
13310         }, mem_tbl_5717[] = {
13311                 { 0x00000200, 0x00008},
13312                 { 0x00010000, 0x0a000},
13313                 { 0x00020000, 0x13c00},
13314                 { 0xffffffff, 0x00000}
13315         }, mem_tbl_57765[] = {
13316                 { 0x00000200, 0x00008},
13317                 { 0x00004000, 0x00800},
13318                 { 0x00006000, 0x09800},
13319                 { 0x00010000, 0x0a000},
13320                 { 0xffffffff, 0x00000}
13321         };
13322         struct mem_entry *mem_tbl;
13323         int err = 0;
13324         int i;
13325
13326         if (tg3_flag(tp, 5717_PLUS))
13327                 mem_tbl = mem_tbl_5717;
13328         else if (tg3_flag(tp, 57765_CLASS) ||
13329                  tg3_asic_rev(tp) == ASIC_REV_5762)
13330                 mem_tbl = mem_tbl_57765;
13331         else if (tg3_flag(tp, 5755_PLUS))
13332                 mem_tbl = mem_tbl_5755;
13333         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13334                 mem_tbl = mem_tbl_5906;
13335         else if (tg3_flag(tp, 5705_PLUS))
13336                 mem_tbl = mem_tbl_5705;
13337         else
13338                 mem_tbl = mem_tbl_570x;
13339
13340         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13341                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13342                 if (err)
13343                         break;
13344         }
13345
13346         return err;
13347 }
13348
13349 #define TG3_TSO_MSS             500
13350
13351 #define TG3_TSO_IP_HDR_LEN      20
13352 #define TG3_TSO_TCP_HDR_LEN     20
13353 #define TG3_TSO_TCP_OPT_LEN     12
13354
13355 static const u8 tg3_tso_header[] = {
13356 0x08, 0x00,
13357 0x45, 0x00, 0x00, 0x00,
13358 0x00, 0x00, 0x40, 0x00,
13359 0x40, 0x06, 0x00, 0x00,
13360 0x0a, 0x00, 0x00, 0x01,
13361 0x0a, 0x00, 0x00, 0x02,
13362 0x0d, 0x00, 0xe0, 0x00,
13363 0x00, 0x00, 0x01, 0x00,
13364 0x00, 0x00, 0x02, 0x00,
13365 0x80, 0x10, 0x10, 0x00,
13366 0x14, 0x09, 0x00, 0x00,
13367 0x01, 0x01, 0x08, 0x0a,
13368 0x11, 0x11, 0x11, 0x11,
13369 0x11, 0x11, 0x11, 0x11,
13370 };
13371
13372 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13373 {
13374         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13375         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13376         u32 budget;
13377         struct sk_buff *skb;
13378         u8 *tx_data, *rx_data;
13379         dma_addr_t map;
13380         int num_pkts, tx_len, rx_len, i, err;
13381         struct tg3_rx_buffer_desc *desc;
13382         struct tg3_napi *tnapi, *rnapi;
13383         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13384
13385         tnapi = &tp->napi[0];
13386         rnapi = &tp->napi[0];
13387         if (tp->irq_cnt > 1) {
13388                 if (tg3_flag(tp, ENABLE_RSS))
13389                         rnapi = &tp->napi[1];
13390                 if (tg3_flag(tp, ENABLE_TSS))
13391                         tnapi = &tp->napi[1];
13392         }
13393         coal_now = tnapi->coal_now | rnapi->coal_now;
13394
13395         err = -EIO;
13396
13397         tx_len = pktsz;
13398         skb = netdev_alloc_skb(tp->dev, tx_len);
13399         if (!skb)
13400                 return -ENOMEM;
13401
13402         tx_data = skb_put(skb, tx_len);
13403         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13404         memset(tx_data + ETH_ALEN, 0x0, 8);
13405
13406         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13407
13408         if (tso_loopback) {
13409                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13410
13411                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13412                               TG3_TSO_TCP_OPT_LEN;
13413
13414                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13415                        sizeof(tg3_tso_header));
13416                 mss = TG3_TSO_MSS;
13417
13418                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13419                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13420
13421                 /* Set the total length field in the IP header */
13422                 iph->tot_len = htons((u16)(mss + hdr_len));
13423
13424                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13425                               TXD_FLAG_CPU_POST_DMA);
13426
13427                 if (tg3_flag(tp, HW_TSO_1) ||
13428                     tg3_flag(tp, HW_TSO_2) ||
13429                     tg3_flag(tp, HW_TSO_3)) {
13430                         struct tcphdr *th;
13431                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13432                         th = (struct tcphdr *)&tx_data[val];
13433                         th->check = 0;
13434                 } else
13435                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13436
13437                 if (tg3_flag(tp, HW_TSO_3)) {
13438                         mss |= (hdr_len & 0xc) << 12;
13439                         if (hdr_len & 0x10)
13440                                 base_flags |= 0x00000010;
13441                         base_flags |= (hdr_len & 0x3e0) << 5;
13442                 } else if (tg3_flag(tp, HW_TSO_2))
13443                         mss |= hdr_len << 9;
13444                 else if (tg3_flag(tp, HW_TSO_1) ||
13445                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13446                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13447                 } else {
13448                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13449                 }
13450
13451                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13452         } else {
13453                 num_pkts = 1;
13454                 data_off = ETH_HLEN;
13455
13456                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13457                     tx_len > VLAN_ETH_FRAME_LEN)
13458                         base_flags |= TXD_FLAG_JMB_PKT;
13459         }
13460
13461         for (i = data_off; i < tx_len; i++)
13462                 tx_data[i] = (u8) (i & 0xff);
13463
13464         map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13465         if (dma_mapping_error(&tp->pdev->dev, map)) {
13466                 dev_kfree_skb(skb);
13467                 return -EIO;
13468         }
13469
13470         val = tnapi->tx_prod;
13471         tnapi->tx_buffers[val].skb = skb;
13472         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13473
13474         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13475                rnapi->coal_now);
13476
13477         udelay(10);
13478
13479         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13480
13481         budget = tg3_tx_avail(tnapi);
13482         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13483                             base_flags | TXD_FLAG_END, mss, 0)) {
13484                 tnapi->tx_buffers[val].skb = NULL;
13485                 dev_kfree_skb(skb);
13486                 return -EIO;
13487         }
13488
13489         tnapi->tx_prod++;
13490
13491         /* Sync BD data before updating mailbox */
13492         wmb();
13493
13494         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13495         tr32_mailbox(tnapi->prodmbox);
13496
13497         udelay(10);
13498
13499         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13500         for (i = 0; i < 35; i++) {
13501                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13502                        coal_now);
13503
13504                 udelay(10);
13505
13506                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13507                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13508                 if ((tx_idx == tnapi->tx_prod) &&
13509                     (rx_idx == (rx_start_idx + num_pkts)))
13510                         break;
13511         }
13512
13513         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13514         dev_kfree_skb(skb);
13515
13516         if (tx_idx != tnapi->tx_prod)
13517                 goto out;
13518
13519         if (rx_idx != rx_start_idx + num_pkts)
13520                 goto out;
13521
13522         val = data_off;
13523         while (rx_idx != rx_start_idx) {
13524                 desc = &rnapi->rx_rcb[rx_start_idx++];
13525                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13526                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13527
13528                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13529                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13530                         goto out;
13531
13532                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13533                          - ETH_FCS_LEN;
13534
13535                 if (!tso_loopback) {
13536                         if (rx_len != tx_len)
13537                                 goto out;
13538
13539                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13540                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13541                                         goto out;
13542                         } else {
13543                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13544                                         goto out;
13545                         }
13546                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13547                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13548                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13549                         goto out;
13550                 }
13551
13552                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13553                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13554                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13555                                              mapping);
13556                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13557                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13558                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13559                                              mapping);
13560                 } else
13561                         goto out;
13562
13563                 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13564                                         DMA_FROM_DEVICE);
13565
13566                 rx_data += TG3_RX_OFFSET(tp);
13567                 for (i = data_off; i < rx_len; i++, val++) {
13568                         if (*(rx_data + i) != (u8) (val & 0xff))
13569                                 goto out;
13570                 }
13571         }
13572
13573         err = 0;
13574
13575         /* tg3_free_rings will unmap and free the rx_data */
13576 out:
13577         return err;
13578 }
13579
13580 #define TG3_STD_LOOPBACK_FAILED         1
13581 #define TG3_JMB_LOOPBACK_FAILED         2
13582 #define TG3_TSO_LOOPBACK_FAILED         4
13583 #define TG3_LOOPBACK_FAILED \
13584         (TG3_STD_LOOPBACK_FAILED | \
13585          TG3_JMB_LOOPBACK_FAILED | \
13586          TG3_TSO_LOOPBACK_FAILED)
13587
13588 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13589 {
13590         int err = -EIO;
13591         u32 eee_cap;
13592         u32 jmb_pkt_sz = 9000;
13593
13594         if (tp->dma_limit)
13595                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13596
13597         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13598         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13599
13600         if (!netif_running(tp->dev)) {
13601                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13602                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13603                 if (do_extlpbk)
13604                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13605                 goto done;
13606         }
13607
13608         err = tg3_reset_hw(tp, true);
13609         if (err) {
13610                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13612                 if (do_extlpbk)
13613                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13614                 goto done;
13615         }
13616
13617         if (tg3_flag(tp, ENABLE_RSS)) {
13618                 int i;
13619
13620                 /* Reroute all rx packets to the 1st queue */
13621                 for (i = MAC_RSS_INDIR_TBL_0;
13622                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13623                         tw32(i, 0x0);
13624         }
13625
13626         /* HW errata - mac loopback fails in some cases on 5780.
13627          * Normal traffic and PHY loopback are not affected by
13628          * errata.  Also, the MAC loopback test is deprecated for
13629          * all newer ASIC revisions.
13630          */
13631         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13632             !tg3_flag(tp, CPMU_PRESENT)) {
13633                 tg3_mac_loopback(tp, true);
13634
13635                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13636                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13637
13638                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13639                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13640                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13641
13642                 tg3_mac_loopback(tp, false);
13643         }
13644
13645         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13646             !tg3_flag(tp, USE_PHYLIB)) {
13647                 int i;
13648
13649                 tg3_phy_lpbk_set(tp, 0, false);
13650
13651                 /* Wait for link */
13652                 for (i = 0; i < 100; i++) {
13653                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13654                                 break;
13655                         mdelay(1);
13656                 }
13657
13658                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13659                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13660                 if (tg3_flag(tp, TSO_CAPABLE) &&
13661                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13662                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13663                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13664                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13665                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13666
13667                 if (do_extlpbk) {
13668                         tg3_phy_lpbk_set(tp, 0, true);
13669
13670                         /* All link indications report up, but the hardware
13671                          * isn't really ready for about 20 msec.  Double it
13672                          * to be sure.
13673                          */
13674                         mdelay(40);
13675
13676                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13677                                 data[TG3_EXT_LOOPB_TEST] |=
13678                                                         TG3_STD_LOOPBACK_FAILED;
13679                         if (tg3_flag(tp, TSO_CAPABLE) &&
13680                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13681                                 data[TG3_EXT_LOOPB_TEST] |=
13682                                                         TG3_TSO_LOOPBACK_FAILED;
13683                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13684                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13685                                 data[TG3_EXT_LOOPB_TEST] |=
13686                                                         TG3_JMB_LOOPBACK_FAILED;
13687                 }
13688
13689                 /* Re-enable gphy autopowerdown. */
13690                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13691                         tg3_phy_toggle_apd(tp, true);
13692         }
13693
13694         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13695                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13696
13697 done:
13698         tp->phy_flags |= eee_cap;
13699
13700         return err;
13701 }
13702
13703 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13704                           u64 *data)
13705 {
13706         struct tg3 *tp = netdev_priv(dev);
13707         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13708
13709         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13710                 if (tg3_power_up(tp)) {
13711                         etest->flags |= ETH_TEST_FL_FAILED;
13712                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13713                         return;
13714                 }
13715                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13716         }
13717
13718         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13719
13720         if (tg3_test_nvram(tp) != 0) {
13721                 etest->flags |= ETH_TEST_FL_FAILED;
13722                 data[TG3_NVRAM_TEST] = 1;
13723         }
13724         if (!doextlpbk && tg3_test_link(tp)) {
13725                 etest->flags |= ETH_TEST_FL_FAILED;
13726                 data[TG3_LINK_TEST] = 1;
13727         }
13728         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13729                 int err, err2 = 0, irq_sync = 0;
13730
13731                 if (netif_running(dev)) {
13732                         tg3_phy_stop(tp);
13733                         tg3_netif_stop(tp);
13734                         irq_sync = 1;
13735                 }
13736
13737                 tg3_full_lock(tp, irq_sync);
13738                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13739                 err = tg3_nvram_lock(tp);
13740                 tg3_halt_cpu(tp, RX_CPU_BASE);
13741                 if (!tg3_flag(tp, 5705_PLUS))
13742                         tg3_halt_cpu(tp, TX_CPU_BASE);
13743                 if (!err)
13744                         tg3_nvram_unlock(tp);
13745
13746                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13747                         tg3_phy_reset(tp);
13748
13749                 if (tg3_test_registers(tp) != 0) {
13750                         etest->flags |= ETH_TEST_FL_FAILED;
13751                         data[TG3_REGISTER_TEST] = 1;
13752                 }
13753
13754                 if (tg3_test_memory(tp) != 0) {
13755                         etest->flags |= ETH_TEST_FL_FAILED;
13756                         data[TG3_MEMORY_TEST] = 1;
13757                 }
13758
13759                 if (doextlpbk)
13760                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13761
13762                 if (tg3_test_loopback(tp, data, doextlpbk))
13763                         etest->flags |= ETH_TEST_FL_FAILED;
13764
13765                 tg3_full_unlock(tp);
13766
13767                 if (tg3_test_interrupt(tp) != 0) {
13768                         etest->flags |= ETH_TEST_FL_FAILED;
13769                         data[TG3_INTERRUPT_TEST] = 1;
13770                 }
13771
13772                 tg3_full_lock(tp, 0);
13773
13774                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13775                 if (netif_running(dev)) {
13776                         tg3_flag_set(tp, INIT_COMPLETE);
13777                         err2 = tg3_restart_hw(tp, true);
13778                         if (!err2)
13779                                 tg3_netif_start(tp);
13780                 }
13781
13782                 tg3_full_unlock(tp);
13783
13784                 if (irq_sync && !err2)
13785                         tg3_phy_start(tp);
13786         }
13787         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13788                 tg3_power_down_prepare(tp);
13789
13790 }
13791
13792 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13793 {
13794         struct tg3 *tp = netdev_priv(dev);
13795         struct hwtstamp_config stmpconf;
13796
13797         if (!tg3_flag(tp, PTP_CAPABLE))
13798                 return -EOPNOTSUPP;
13799
13800         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13801                 return -EFAULT;
13802
13803         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13804             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13805                 return -ERANGE;
13806
13807         switch (stmpconf.rx_filter) {
13808         case HWTSTAMP_FILTER_NONE:
13809                 tp->rxptpctl = 0;
13810                 break;
13811         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13812                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13813                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13814                 break;
13815         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13816                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13817                                TG3_RX_PTP_CTL_SYNC_EVNT;
13818                 break;
13819         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13820                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13821                                TG3_RX_PTP_CTL_DELAY_REQ;
13822                 break;
13823         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13824                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13825                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13826                 break;
13827         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13828                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13829                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13830                 break;
13831         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13832                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13833                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13834                 break;
13835         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13836                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13837                                TG3_RX_PTP_CTL_SYNC_EVNT;
13838                 break;
13839         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13840                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13841                                TG3_RX_PTP_CTL_SYNC_EVNT;
13842                 break;
13843         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13845                                TG3_RX_PTP_CTL_SYNC_EVNT;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13849                                TG3_RX_PTP_CTL_DELAY_REQ;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13853                                TG3_RX_PTP_CTL_DELAY_REQ;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13857                                TG3_RX_PTP_CTL_DELAY_REQ;
13858                 break;
13859         default:
13860                 return -ERANGE;
13861         }
13862
13863         if (netif_running(dev) && tp->rxptpctl)
13864                 tw32(TG3_RX_PTP_CTL,
13865                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13866
13867         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13868                 tg3_flag_set(tp, TX_TSTAMP_EN);
13869         else
13870                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13871
13872         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13873                 -EFAULT : 0;
13874 }
13875
13876 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13877 {
13878         struct tg3 *tp = netdev_priv(dev);
13879         struct hwtstamp_config stmpconf;
13880
13881         if (!tg3_flag(tp, PTP_CAPABLE))
13882                 return -EOPNOTSUPP;
13883
13884         stmpconf.flags = 0;
13885         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13886                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13887
13888         switch (tp->rxptpctl) {
13889         case 0:
13890                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13891                 break;
13892         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13893                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13894                 break;
13895         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13896                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13897                 break;
13898         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13899                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13900                 break;
13901         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13902                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13903                 break;
13904         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13905                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13906                 break;
13907         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13908                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13909                 break;
13910         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13911                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13912                 break;
13913         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13914                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13915                 break;
13916         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13917                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13918                 break;
13919         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13920                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13921                 break;
13922         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13923                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13924                 break;
13925         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13926                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13927                 break;
13928         default:
13929                 WARN_ON_ONCE(1);
13930                 return -ERANGE;
13931         }
13932
13933         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13934                 -EFAULT : 0;
13935 }
13936
13937 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13938 {
13939         struct mii_ioctl_data *data = if_mii(ifr);
13940         struct tg3 *tp = netdev_priv(dev);
13941         int err;
13942
13943         if (tg3_flag(tp, USE_PHYLIB)) {
13944                 struct phy_device *phydev;
13945                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13946                         return -EAGAIN;
13947                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13948                 return phy_mii_ioctl(phydev, ifr, cmd);
13949         }
13950
13951         switch (cmd) {
13952         case SIOCGMIIPHY:
13953                 data->phy_id = tp->phy_addr;
13954
13955                 fallthrough;
13956         case SIOCGMIIREG: {
13957                 u32 mii_regval;
13958
13959                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13960                         break;                  /* We have no PHY */
13961
13962                 if (!netif_running(dev))
13963                         return -EAGAIN;
13964
13965                 spin_lock_bh(&tp->lock);
13966                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13967                                     data->reg_num & 0x1f, &mii_regval);
13968                 spin_unlock_bh(&tp->lock);
13969
13970                 data->val_out = mii_regval;
13971
13972                 return err;
13973         }
13974
13975         case SIOCSMIIREG:
13976                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13977                         break;                  /* We have no PHY */
13978
13979                 if (!netif_running(dev))
13980                         return -EAGAIN;
13981
13982                 spin_lock_bh(&tp->lock);
13983                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13984                                      data->reg_num & 0x1f, data->val_in);
13985                 spin_unlock_bh(&tp->lock);
13986
13987                 return err;
13988
13989         case SIOCSHWTSTAMP:
13990                 return tg3_hwtstamp_set(dev, ifr);
13991
13992         case SIOCGHWTSTAMP:
13993                 return tg3_hwtstamp_get(dev, ifr);
13994
13995         default:
13996                 /* do nothing */
13997                 break;
13998         }
13999         return -EOPNOTSUPP;
14000 }
14001
14002 static int tg3_get_coalesce(struct net_device *dev,
14003                             struct ethtool_coalesce *ec,
14004                             struct kernel_ethtool_coalesce *kernel_coal,
14005                             struct netlink_ext_ack *extack)
14006 {
14007         struct tg3 *tp = netdev_priv(dev);
14008
14009         memcpy(ec, &tp->coal, sizeof(*ec));
14010         return 0;
14011 }
14012
14013 static int tg3_set_coalesce(struct net_device *dev,
14014                             struct ethtool_coalesce *ec,
14015                             struct kernel_ethtool_coalesce *kernel_coal,
14016                             struct netlink_ext_ack *extack)
14017 {
14018         struct tg3 *tp = netdev_priv(dev);
14019         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14020         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14021
14022         if (!tg3_flag(tp, 5705_PLUS)) {
14023                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14024                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14025                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14026                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14027         }
14028
14029         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14030             (!ec->rx_coalesce_usecs) ||
14031             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14032             (!ec->tx_coalesce_usecs) ||
14033             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14034             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14035             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14036             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14037             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14038             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14039             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14040             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14041                 return -EINVAL;
14042
14043         /* Only copy relevant parameters, ignore all others. */
14044         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14045         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14046         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14047         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14048         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14049         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14050         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14051         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14052         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14053
14054         if (netif_running(dev)) {
14055                 tg3_full_lock(tp, 0);
14056                 __tg3_set_coalesce(tp, &tp->coal);
14057                 tg3_full_unlock(tp);
14058         }
14059         return 0;
14060 }
14061
14062 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14063 {
14064         struct tg3 *tp = netdev_priv(dev);
14065
14066         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14067                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14068                 return -EOPNOTSUPP;
14069         }
14070
14071         if (edata->advertised != tp->eee.advertised) {
14072                 netdev_warn(tp->dev,
14073                             "Direct manipulation of EEE advertisement is not supported\n");
14074                 return -EINVAL;
14075         }
14076
14077         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14078                 netdev_warn(tp->dev,
14079                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14080                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14081                 return -EINVAL;
14082         }
14083
14084         tp->eee = *edata;
14085
14086         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14087         tg3_warn_mgmt_link_flap(tp);
14088
14089         if (netif_running(tp->dev)) {
14090                 tg3_full_lock(tp, 0);
14091                 tg3_setup_eee(tp);
14092                 tg3_phy_reset(tp);
14093                 tg3_full_unlock(tp);
14094         }
14095
14096         return 0;
14097 }
14098
14099 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14100 {
14101         struct tg3 *tp = netdev_priv(dev);
14102
14103         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14104                 netdev_warn(tp->dev,
14105                             "Board does not support EEE!\n");
14106                 return -EOPNOTSUPP;
14107         }
14108
14109         *edata = tp->eee;
14110         return 0;
14111 }
14112
14113 static const struct ethtool_ops tg3_ethtool_ops = {
14114         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14115                                      ETHTOOL_COALESCE_MAX_FRAMES |
14116                                      ETHTOOL_COALESCE_USECS_IRQ |
14117                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14118                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14119         .get_drvinfo            = tg3_get_drvinfo,
14120         .get_regs_len           = tg3_get_regs_len,
14121         .get_regs               = tg3_get_regs,
14122         .get_wol                = tg3_get_wol,
14123         .set_wol                = tg3_set_wol,
14124         .get_msglevel           = tg3_get_msglevel,
14125         .set_msglevel           = tg3_set_msglevel,
14126         .nway_reset             = tg3_nway_reset,
14127         .get_link               = ethtool_op_get_link,
14128         .get_eeprom_len         = tg3_get_eeprom_len,
14129         .get_eeprom             = tg3_get_eeprom,
14130         .set_eeprom             = tg3_set_eeprom,
14131         .get_ringparam          = tg3_get_ringparam,
14132         .set_ringparam          = tg3_set_ringparam,
14133         .get_pauseparam         = tg3_get_pauseparam,
14134         .set_pauseparam         = tg3_set_pauseparam,
14135         .self_test              = tg3_self_test,
14136         .get_strings            = tg3_get_strings,
14137         .set_phys_id            = tg3_set_phys_id,
14138         .get_ethtool_stats      = tg3_get_ethtool_stats,
14139         .get_coalesce           = tg3_get_coalesce,
14140         .set_coalesce           = tg3_set_coalesce,
14141         .get_sset_count         = tg3_get_sset_count,
14142         .get_rxnfc              = tg3_get_rxnfc,
14143         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14144         .get_rxfh               = tg3_get_rxfh,
14145         .set_rxfh               = tg3_set_rxfh,
14146         .get_channels           = tg3_get_channels,
14147         .set_channels           = tg3_set_channels,
14148         .get_ts_info            = tg3_get_ts_info,
14149         .get_eee                = tg3_get_eee,
14150         .set_eee                = tg3_set_eee,
14151         .get_link_ksettings     = tg3_get_link_ksettings,
14152         .set_link_ksettings     = tg3_set_link_ksettings,
14153 };
14154
14155 static void tg3_get_stats64(struct net_device *dev,
14156                             struct rtnl_link_stats64 *stats)
14157 {
14158         struct tg3 *tp = netdev_priv(dev);
14159
14160         spin_lock_bh(&tp->lock);
14161         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14162                 *stats = tp->net_stats_prev;
14163                 spin_unlock_bh(&tp->lock);
14164                 return;
14165         }
14166
14167         tg3_get_nstats(tp, stats);
14168         spin_unlock_bh(&tp->lock);
14169 }
14170
14171 static void tg3_set_rx_mode(struct net_device *dev)
14172 {
14173         struct tg3 *tp = netdev_priv(dev);
14174
14175         if (!netif_running(dev))
14176                 return;
14177
14178         tg3_full_lock(tp, 0);
14179         __tg3_set_rx_mode(dev);
14180         tg3_full_unlock(tp);
14181 }
14182
14183 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14184                                int new_mtu)
14185 {
14186         dev->mtu = new_mtu;
14187
14188         if (new_mtu > ETH_DATA_LEN) {
14189                 if (tg3_flag(tp, 5780_CLASS)) {
14190                         netdev_update_features(dev);
14191                         tg3_flag_clear(tp, TSO_CAPABLE);
14192                 } else {
14193                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14194                 }
14195         } else {
14196                 if (tg3_flag(tp, 5780_CLASS)) {
14197                         tg3_flag_set(tp, TSO_CAPABLE);
14198                         netdev_update_features(dev);
14199                 }
14200                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14201         }
14202 }
14203
14204 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14205 {
14206         struct tg3 *tp = netdev_priv(dev);
14207         int err;
14208         bool reset_phy = false;
14209
14210         if (!netif_running(dev)) {
14211                 /* We'll just catch it later when the
14212                  * device is up'd.
14213                  */
14214                 tg3_set_mtu(dev, tp, new_mtu);
14215                 return 0;
14216         }
14217
14218         tg3_phy_stop(tp);
14219
14220         tg3_netif_stop(tp);
14221
14222         tg3_set_mtu(dev, tp, new_mtu);
14223
14224         tg3_full_lock(tp, 1);
14225
14226         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14227
14228         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14229          * breaks all requests to 256 bytes.
14230          */
14231         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14232             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14233             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14234             tg3_asic_rev(tp) == ASIC_REV_5720)
14235                 reset_phy = true;
14236
14237         err = tg3_restart_hw(tp, reset_phy);
14238
14239         if (!err)
14240                 tg3_netif_start(tp);
14241
14242         tg3_full_unlock(tp);
14243
14244         if (!err)
14245                 tg3_phy_start(tp);
14246
14247         return err;
14248 }
14249
14250 static const struct net_device_ops tg3_netdev_ops = {
14251         .ndo_open               = tg3_open,
14252         .ndo_stop               = tg3_close,
14253         .ndo_start_xmit         = tg3_start_xmit,
14254         .ndo_get_stats64        = tg3_get_stats64,
14255         .ndo_validate_addr      = eth_validate_addr,
14256         .ndo_set_rx_mode        = tg3_set_rx_mode,
14257         .ndo_set_mac_address    = tg3_set_mac_addr,
14258         .ndo_eth_ioctl          = tg3_ioctl,
14259         .ndo_tx_timeout         = tg3_tx_timeout,
14260         .ndo_change_mtu         = tg3_change_mtu,
14261         .ndo_fix_features       = tg3_fix_features,
14262         .ndo_set_features       = tg3_set_features,
14263 #ifdef CONFIG_NET_POLL_CONTROLLER
14264         .ndo_poll_controller    = tg3_poll_controller,
14265 #endif
14266 };
14267
14268 static void tg3_get_eeprom_size(struct tg3 *tp)
14269 {
14270         u32 cursize, val, magic;
14271
14272         tp->nvram_size = EEPROM_CHIP_SIZE;
14273
14274         if (tg3_nvram_read(tp, 0, &magic) != 0)
14275                 return;
14276
14277         if ((magic != TG3_EEPROM_MAGIC) &&
14278             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14279             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14280                 return;
14281
14282         /*
14283          * Size the chip by reading offsets at increasing powers of two.
14284          * When we encounter our validation signature, we know the addressing
14285          * has wrapped around, and thus have our chip size.
14286          */
14287         cursize = 0x10;
14288
14289         while (cursize < tp->nvram_size) {
14290                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14291                         return;
14292
14293                 if (val == magic)
14294                         break;
14295
14296                 cursize <<= 1;
14297         }
14298
14299         tp->nvram_size = cursize;
14300 }
14301
14302 static void tg3_get_nvram_size(struct tg3 *tp)
14303 {
14304         u32 val;
14305
14306         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14307                 return;
14308
14309         /* Selfboot format */
14310         if (val != TG3_EEPROM_MAGIC) {
14311                 tg3_get_eeprom_size(tp);
14312                 return;
14313         }
14314
14315         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14316                 if (val != 0) {
14317                         /* This is confusing.  We want to operate on the
14318                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14319                          * call will read from NVRAM and byteswap the data
14320                          * according to the byteswapping settings for all
14321                          * other register accesses.  This ensures the data we
14322                          * want will always reside in the lower 16-bits.
14323                          * However, the data in NVRAM is in LE format, which
14324                          * means the data from the NVRAM read will always be
14325                          * opposite the endianness of the CPU.  The 16-bit
14326                          * byteswap then brings the data to CPU endianness.
14327                          */
14328                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14329                         return;
14330                 }
14331         }
14332         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14333 }
14334
14335 static void tg3_get_nvram_info(struct tg3 *tp)
14336 {
14337         u32 nvcfg1;
14338
14339         nvcfg1 = tr32(NVRAM_CFG1);
14340         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14341                 tg3_flag_set(tp, FLASH);
14342         } else {
14343                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14344                 tw32(NVRAM_CFG1, nvcfg1);
14345         }
14346
14347         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14348             tg3_flag(tp, 5780_CLASS)) {
14349                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14350                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14351                         tp->nvram_jedecnum = JEDEC_ATMEL;
14352                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14353                         tg3_flag_set(tp, NVRAM_BUFFERED);
14354                         break;
14355                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14356                         tp->nvram_jedecnum = JEDEC_ATMEL;
14357                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14358                         break;
14359                 case FLASH_VENDOR_ATMEL_EEPROM:
14360                         tp->nvram_jedecnum = JEDEC_ATMEL;
14361                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14362                         tg3_flag_set(tp, NVRAM_BUFFERED);
14363                         break;
14364                 case FLASH_VENDOR_ST:
14365                         tp->nvram_jedecnum = JEDEC_ST;
14366                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14367                         tg3_flag_set(tp, NVRAM_BUFFERED);
14368                         break;
14369                 case FLASH_VENDOR_SAIFUN:
14370                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14371                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14372                         break;
14373                 case FLASH_VENDOR_SST_SMALL:
14374                 case FLASH_VENDOR_SST_LARGE:
14375                         tp->nvram_jedecnum = JEDEC_SST;
14376                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14377                         break;
14378                 }
14379         } else {
14380                 tp->nvram_jedecnum = JEDEC_ATMEL;
14381                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14382                 tg3_flag_set(tp, NVRAM_BUFFERED);
14383         }
14384 }
14385
14386 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14387 {
14388         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14389         case FLASH_5752PAGE_SIZE_256:
14390                 tp->nvram_pagesize = 256;
14391                 break;
14392         case FLASH_5752PAGE_SIZE_512:
14393                 tp->nvram_pagesize = 512;
14394                 break;
14395         case FLASH_5752PAGE_SIZE_1K:
14396                 tp->nvram_pagesize = 1024;
14397                 break;
14398         case FLASH_5752PAGE_SIZE_2K:
14399                 tp->nvram_pagesize = 2048;
14400                 break;
14401         case FLASH_5752PAGE_SIZE_4K:
14402                 tp->nvram_pagesize = 4096;
14403                 break;
14404         case FLASH_5752PAGE_SIZE_264:
14405                 tp->nvram_pagesize = 264;
14406                 break;
14407         case FLASH_5752PAGE_SIZE_528:
14408                 tp->nvram_pagesize = 528;
14409                 break;
14410         }
14411 }
14412
14413 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14414 {
14415         u32 nvcfg1;
14416
14417         nvcfg1 = tr32(NVRAM_CFG1);
14418
14419         /* NVRAM protection for TPM */
14420         if (nvcfg1 & (1 << 27))
14421                 tg3_flag_set(tp, PROTECTED_NVRAM);
14422
14423         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14424         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14425         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14426                 tp->nvram_jedecnum = JEDEC_ATMEL;
14427                 tg3_flag_set(tp, NVRAM_BUFFERED);
14428                 break;
14429         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14430                 tp->nvram_jedecnum = JEDEC_ATMEL;
14431                 tg3_flag_set(tp, NVRAM_BUFFERED);
14432                 tg3_flag_set(tp, FLASH);
14433                 break;
14434         case FLASH_5752VENDOR_ST_M45PE10:
14435         case FLASH_5752VENDOR_ST_M45PE20:
14436         case FLASH_5752VENDOR_ST_M45PE40:
14437                 tp->nvram_jedecnum = JEDEC_ST;
14438                 tg3_flag_set(tp, NVRAM_BUFFERED);
14439                 tg3_flag_set(tp, FLASH);
14440                 break;
14441         }
14442
14443         if (tg3_flag(tp, FLASH)) {
14444                 tg3_nvram_get_pagesize(tp, nvcfg1);
14445         } else {
14446                 /* For eeprom, set pagesize to maximum eeprom size */
14447                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14448
14449                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14450                 tw32(NVRAM_CFG1, nvcfg1);
14451         }
14452 }
14453
14454 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14455 {
14456         u32 nvcfg1, protect = 0;
14457
14458         nvcfg1 = tr32(NVRAM_CFG1);
14459
14460         /* NVRAM protection for TPM */
14461         if (nvcfg1 & (1 << 27)) {
14462                 tg3_flag_set(tp, PROTECTED_NVRAM);
14463                 protect = 1;
14464         }
14465
14466         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14467         switch (nvcfg1) {
14468         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14469         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14470         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14471         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14472                 tp->nvram_jedecnum = JEDEC_ATMEL;
14473                 tg3_flag_set(tp, NVRAM_BUFFERED);
14474                 tg3_flag_set(tp, FLASH);
14475                 tp->nvram_pagesize = 264;
14476                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14477                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14478                         tp->nvram_size = (protect ? 0x3e200 :
14479                                           TG3_NVRAM_SIZE_512KB);
14480                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14481                         tp->nvram_size = (protect ? 0x1f200 :
14482                                           TG3_NVRAM_SIZE_256KB);
14483                 else
14484                         tp->nvram_size = (protect ? 0x1f200 :
14485                                           TG3_NVRAM_SIZE_128KB);
14486                 break;
14487         case FLASH_5752VENDOR_ST_M45PE10:
14488         case FLASH_5752VENDOR_ST_M45PE20:
14489         case FLASH_5752VENDOR_ST_M45PE40:
14490                 tp->nvram_jedecnum = JEDEC_ST;
14491                 tg3_flag_set(tp, NVRAM_BUFFERED);
14492                 tg3_flag_set(tp, FLASH);
14493                 tp->nvram_pagesize = 256;
14494                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14495                         tp->nvram_size = (protect ?
14496                                           TG3_NVRAM_SIZE_64KB :
14497                                           TG3_NVRAM_SIZE_128KB);
14498                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14499                         tp->nvram_size = (protect ?
14500                                           TG3_NVRAM_SIZE_64KB :
14501                                           TG3_NVRAM_SIZE_256KB);
14502                 else
14503                         tp->nvram_size = (protect ?
14504                                           TG3_NVRAM_SIZE_128KB :
14505                                           TG3_NVRAM_SIZE_512KB);
14506                 break;
14507         }
14508 }
14509
14510 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14511 {
14512         u32 nvcfg1;
14513
14514         nvcfg1 = tr32(NVRAM_CFG1);
14515
14516         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14517         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14518         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14519         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14520         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14521                 tp->nvram_jedecnum = JEDEC_ATMEL;
14522                 tg3_flag_set(tp, NVRAM_BUFFERED);
14523                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14524
14525                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14526                 tw32(NVRAM_CFG1, nvcfg1);
14527                 break;
14528         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14529         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14530         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14531         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14532                 tp->nvram_jedecnum = JEDEC_ATMEL;
14533                 tg3_flag_set(tp, NVRAM_BUFFERED);
14534                 tg3_flag_set(tp, FLASH);
14535                 tp->nvram_pagesize = 264;
14536                 break;
14537         case FLASH_5752VENDOR_ST_M45PE10:
14538         case FLASH_5752VENDOR_ST_M45PE20:
14539         case FLASH_5752VENDOR_ST_M45PE40:
14540                 tp->nvram_jedecnum = JEDEC_ST;
14541                 tg3_flag_set(tp, NVRAM_BUFFERED);
14542                 tg3_flag_set(tp, FLASH);
14543                 tp->nvram_pagesize = 256;
14544                 break;
14545         }
14546 }
14547
14548 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14549 {
14550         u32 nvcfg1, protect = 0;
14551
14552         nvcfg1 = tr32(NVRAM_CFG1);
14553
14554         /* NVRAM protection for TPM */
14555         if (nvcfg1 & (1 << 27)) {
14556                 tg3_flag_set(tp, PROTECTED_NVRAM);
14557                 protect = 1;
14558         }
14559
14560         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14561         switch (nvcfg1) {
14562         case FLASH_5761VENDOR_ATMEL_ADB021D:
14563         case FLASH_5761VENDOR_ATMEL_ADB041D:
14564         case FLASH_5761VENDOR_ATMEL_ADB081D:
14565         case FLASH_5761VENDOR_ATMEL_ADB161D:
14566         case FLASH_5761VENDOR_ATMEL_MDB021D:
14567         case FLASH_5761VENDOR_ATMEL_MDB041D:
14568         case FLASH_5761VENDOR_ATMEL_MDB081D:
14569         case FLASH_5761VENDOR_ATMEL_MDB161D:
14570                 tp->nvram_jedecnum = JEDEC_ATMEL;
14571                 tg3_flag_set(tp, NVRAM_BUFFERED);
14572                 tg3_flag_set(tp, FLASH);
14573                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14574                 tp->nvram_pagesize = 256;
14575                 break;
14576         case FLASH_5761VENDOR_ST_A_M45PE20:
14577         case FLASH_5761VENDOR_ST_A_M45PE40:
14578         case FLASH_5761VENDOR_ST_A_M45PE80:
14579         case FLASH_5761VENDOR_ST_A_M45PE16:
14580         case FLASH_5761VENDOR_ST_M_M45PE20:
14581         case FLASH_5761VENDOR_ST_M_M45PE40:
14582         case FLASH_5761VENDOR_ST_M_M45PE80:
14583         case FLASH_5761VENDOR_ST_M_M45PE16:
14584                 tp->nvram_jedecnum = JEDEC_ST;
14585                 tg3_flag_set(tp, NVRAM_BUFFERED);
14586                 tg3_flag_set(tp, FLASH);
14587                 tp->nvram_pagesize = 256;
14588                 break;
14589         }
14590
14591         if (protect) {
14592                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14593         } else {
14594                 switch (nvcfg1) {
14595                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14596                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14597                 case FLASH_5761VENDOR_ST_A_M45PE16:
14598                 case FLASH_5761VENDOR_ST_M_M45PE16:
14599                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14600                         break;
14601                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14602                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14603                 case FLASH_5761VENDOR_ST_A_M45PE80:
14604                 case FLASH_5761VENDOR_ST_M_M45PE80:
14605                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14606                         break;
14607                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14608                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14609                 case FLASH_5761VENDOR_ST_A_M45PE40:
14610                 case FLASH_5761VENDOR_ST_M_M45PE40:
14611                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14612                         break;
14613                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14614                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14615                 case FLASH_5761VENDOR_ST_A_M45PE20:
14616                 case FLASH_5761VENDOR_ST_M_M45PE20:
14617                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14618                         break;
14619                 }
14620         }
14621 }
14622
14623 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14624 {
14625         tp->nvram_jedecnum = JEDEC_ATMEL;
14626         tg3_flag_set(tp, NVRAM_BUFFERED);
14627         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14628 }
14629
14630 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14631 {
14632         u32 nvcfg1;
14633
14634         nvcfg1 = tr32(NVRAM_CFG1);
14635
14636         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14637         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14638         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14639                 tp->nvram_jedecnum = JEDEC_ATMEL;
14640                 tg3_flag_set(tp, NVRAM_BUFFERED);
14641                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14642
14643                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14644                 tw32(NVRAM_CFG1, nvcfg1);
14645                 return;
14646         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14647         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14648         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14649         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14650         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14651         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14652         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14653                 tp->nvram_jedecnum = JEDEC_ATMEL;
14654                 tg3_flag_set(tp, NVRAM_BUFFERED);
14655                 tg3_flag_set(tp, FLASH);
14656
14657                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14658                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14659                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14660                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14661                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14662                         break;
14663                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14664                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14665                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14666                         break;
14667                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14668                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14669                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14670                         break;
14671                 }
14672                 break;
14673         case FLASH_5752VENDOR_ST_M45PE10:
14674         case FLASH_5752VENDOR_ST_M45PE20:
14675         case FLASH_5752VENDOR_ST_M45PE40:
14676                 tp->nvram_jedecnum = JEDEC_ST;
14677                 tg3_flag_set(tp, NVRAM_BUFFERED);
14678                 tg3_flag_set(tp, FLASH);
14679
14680                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14681                 case FLASH_5752VENDOR_ST_M45PE10:
14682                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14683                         break;
14684                 case FLASH_5752VENDOR_ST_M45PE20:
14685                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14686                         break;
14687                 case FLASH_5752VENDOR_ST_M45PE40:
14688                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14689                         break;
14690                 }
14691                 break;
14692         default:
14693                 tg3_flag_set(tp, NO_NVRAM);
14694                 return;
14695         }
14696
14697         tg3_nvram_get_pagesize(tp, nvcfg1);
14698         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14699                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14700 }
14701
14702
14703 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14704 {
14705         u32 nvcfg1;
14706
14707         nvcfg1 = tr32(NVRAM_CFG1);
14708
14709         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14710         case FLASH_5717VENDOR_ATMEL_EEPROM:
14711         case FLASH_5717VENDOR_MICRO_EEPROM:
14712                 tp->nvram_jedecnum = JEDEC_ATMEL;
14713                 tg3_flag_set(tp, NVRAM_BUFFERED);
14714                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14715
14716                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14717                 tw32(NVRAM_CFG1, nvcfg1);
14718                 return;
14719         case FLASH_5717VENDOR_ATMEL_MDB011D:
14720         case FLASH_5717VENDOR_ATMEL_ADB011B:
14721         case FLASH_5717VENDOR_ATMEL_ADB011D:
14722         case FLASH_5717VENDOR_ATMEL_MDB021D:
14723         case FLASH_5717VENDOR_ATMEL_ADB021B:
14724         case FLASH_5717VENDOR_ATMEL_ADB021D:
14725         case FLASH_5717VENDOR_ATMEL_45USPT:
14726                 tp->nvram_jedecnum = JEDEC_ATMEL;
14727                 tg3_flag_set(tp, NVRAM_BUFFERED);
14728                 tg3_flag_set(tp, FLASH);
14729
14730                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14731                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14732                         /* Detect size with tg3_nvram_get_size() */
14733                         break;
14734                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14735                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14736                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14737                         break;
14738                 default:
14739                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14740                         break;
14741                 }
14742                 break;
14743         case FLASH_5717VENDOR_ST_M_M25PE10:
14744         case FLASH_5717VENDOR_ST_A_M25PE10:
14745         case FLASH_5717VENDOR_ST_M_M45PE10:
14746         case FLASH_5717VENDOR_ST_A_M45PE10:
14747         case FLASH_5717VENDOR_ST_M_M25PE20:
14748         case FLASH_5717VENDOR_ST_A_M25PE20:
14749         case FLASH_5717VENDOR_ST_M_M45PE20:
14750         case FLASH_5717VENDOR_ST_A_M45PE20:
14751         case FLASH_5717VENDOR_ST_25USPT:
14752         case FLASH_5717VENDOR_ST_45USPT:
14753                 tp->nvram_jedecnum = JEDEC_ST;
14754                 tg3_flag_set(tp, NVRAM_BUFFERED);
14755                 tg3_flag_set(tp, FLASH);
14756
14757                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14758                 case FLASH_5717VENDOR_ST_M_M25PE20:
14759                 case FLASH_5717VENDOR_ST_M_M45PE20:
14760                         /* Detect size with tg3_nvram_get_size() */
14761                         break;
14762                 case FLASH_5717VENDOR_ST_A_M25PE20:
14763                 case FLASH_5717VENDOR_ST_A_M45PE20:
14764                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14765                         break;
14766                 default:
14767                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14768                         break;
14769                 }
14770                 break;
14771         default:
14772                 tg3_flag_set(tp, NO_NVRAM);
14773                 return;
14774         }
14775
14776         tg3_nvram_get_pagesize(tp, nvcfg1);
14777         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14778                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14779 }
14780
14781 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14782 {
14783         u32 nvcfg1, nvmpinstrp, nv_status;
14784
14785         nvcfg1 = tr32(NVRAM_CFG1);
14786         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14787
14788         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14789                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14790                         tg3_flag_set(tp, NO_NVRAM);
14791                         return;
14792                 }
14793
14794                 switch (nvmpinstrp) {
14795                 case FLASH_5762_MX25L_100:
14796                 case FLASH_5762_MX25L_200:
14797                 case FLASH_5762_MX25L_400:
14798                 case FLASH_5762_MX25L_800:
14799                 case FLASH_5762_MX25L_160_320:
14800                         tp->nvram_pagesize = 4096;
14801                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14802                         tg3_flag_set(tp, NVRAM_BUFFERED);
14803                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14804                         tg3_flag_set(tp, FLASH);
14805                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14806                         tp->nvram_size =
14807                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14808                                                 AUTOSENSE_DEVID_MASK)
14809                                         << AUTOSENSE_SIZE_IN_MB);
14810                         return;
14811
14812                 case FLASH_5762_EEPROM_HD:
14813                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14814                         break;
14815                 case FLASH_5762_EEPROM_LD:
14816                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14817                         break;
14818                 case FLASH_5720VENDOR_M_ST_M45PE20:
14819                         /* This pinstrap supports multiple sizes, so force it
14820                          * to read the actual size from location 0xf0.
14821                          */
14822                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14823                         break;
14824                 }
14825         }
14826
14827         switch (nvmpinstrp) {
14828         case FLASH_5720_EEPROM_HD:
14829         case FLASH_5720_EEPROM_LD:
14830                 tp->nvram_jedecnum = JEDEC_ATMEL;
14831                 tg3_flag_set(tp, NVRAM_BUFFERED);
14832
14833                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14834                 tw32(NVRAM_CFG1, nvcfg1);
14835                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14836                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14837                 else
14838                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14839                 return;
14840         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14841         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14842         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14843         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14844         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14845         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14846         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14847         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14848         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14849         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14850         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14851         case FLASH_5720VENDOR_ATMEL_45USPT:
14852                 tp->nvram_jedecnum = JEDEC_ATMEL;
14853                 tg3_flag_set(tp, NVRAM_BUFFERED);
14854                 tg3_flag_set(tp, FLASH);
14855
14856                 switch (nvmpinstrp) {
14857                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14858                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14859                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14860                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14861                         break;
14862                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14863                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14864                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14865                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14866                         break;
14867                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14868                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14869                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14870                         break;
14871                 default:
14872                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14873                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14874                         break;
14875                 }
14876                 break;
14877         case FLASH_5720VENDOR_M_ST_M25PE10:
14878         case FLASH_5720VENDOR_M_ST_M45PE10:
14879         case FLASH_5720VENDOR_A_ST_M25PE10:
14880         case FLASH_5720VENDOR_A_ST_M45PE10:
14881         case FLASH_5720VENDOR_M_ST_M25PE20:
14882         case FLASH_5720VENDOR_M_ST_M45PE20:
14883         case FLASH_5720VENDOR_A_ST_M25PE20:
14884         case FLASH_5720VENDOR_A_ST_M45PE20:
14885         case FLASH_5720VENDOR_M_ST_M25PE40:
14886         case FLASH_5720VENDOR_M_ST_M45PE40:
14887         case FLASH_5720VENDOR_A_ST_M25PE40:
14888         case FLASH_5720VENDOR_A_ST_M45PE40:
14889         case FLASH_5720VENDOR_M_ST_M25PE80:
14890         case FLASH_5720VENDOR_M_ST_M45PE80:
14891         case FLASH_5720VENDOR_A_ST_M25PE80:
14892         case FLASH_5720VENDOR_A_ST_M45PE80:
14893         case FLASH_5720VENDOR_ST_25USPT:
14894         case FLASH_5720VENDOR_ST_45USPT:
14895                 tp->nvram_jedecnum = JEDEC_ST;
14896                 tg3_flag_set(tp, NVRAM_BUFFERED);
14897                 tg3_flag_set(tp, FLASH);
14898
14899                 switch (nvmpinstrp) {
14900                 case FLASH_5720VENDOR_M_ST_M25PE20:
14901                 case FLASH_5720VENDOR_M_ST_M45PE20:
14902                 case FLASH_5720VENDOR_A_ST_M25PE20:
14903                 case FLASH_5720VENDOR_A_ST_M45PE20:
14904                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14905                         break;
14906                 case FLASH_5720VENDOR_M_ST_M25PE40:
14907                 case FLASH_5720VENDOR_M_ST_M45PE40:
14908                 case FLASH_5720VENDOR_A_ST_M25PE40:
14909                 case FLASH_5720VENDOR_A_ST_M45PE40:
14910                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14911                         break;
14912                 case FLASH_5720VENDOR_M_ST_M25PE80:
14913                 case FLASH_5720VENDOR_M_ST_M45PE80:
14914                 case FLASH_5720VENDOR_A_ST_M25PE80:
14915                 case FLASH_5720VENDOR_A_ST_M45PE80:
14916                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14917                         break;
14918                 default:
14919                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14920                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14921                         break;
14922                 }
14923                 break;
14924         default:
14925                 tg3_flag_set(tp, NO_NVRAM);
14926                 return;
14927         }
14928
14929         tg3_nvram_get_pagesize(tp, nvcfg1);
14930         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14931                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14932
14933         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14934                 u32 val;
14935
14936                 if (tg3_nvram_read(tp, 0, &val))
14937                         return;
14938
14939                 if (val != TG3_EEPROM_MAGIC &&
14940                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14941                         tg3_flag_set(tp, NO_NVRAM);
14942         }
14943 }
14944
14945 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14946 static void tg3_nvram_init(struct tg3 *tp)
14947 {
14948         if (tg3_flag(tp, IS_SSB_CORE)) {
14949                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14950                 tg3_flag_clear(tp, NVRAM);
14951                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14952                 tg3_flag_set(tp, NO_NVRAM);
14953                 return;
14954         }
14955
14956         tw32_f(GRC_EEPROM_ADDR,
14957              (EEPROM_ADDR_FSM_RESET |
14958               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14959                EEPROM_ADDR_CLKPERD_SHIFT)));
14960
14961         msleep(1);
14962
14963         /* Enable seeprom accesses. */
14964         tw32_f(GRC_LOCAL_CTRL,
14965              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14966         udelay(100);
14967
14968         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14969             tg3_asic_rev(tp) != ASIC_REV_5701) {
14970                 tg3_flag_set(tp, NVRAM);
14971
14972                 if (tg3_nvram_lock(tp)) {
14973                         netdev_warn(tp->dev,
14974                                     "Cannot get nvram lock, %s failed\n",
14975                                     __func__);
14976                         return;
14977                 }
14978                 tg3_enable_nvram_access(tp);
14979
14980                 tp->nvram_size = 0;
14981
14982                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14983                         tg3_get_5752_nvram_info(tp);
14984                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14985                         tg3_get_5755_nvram_info(tp);
14986                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14987                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14988                          tg3_asic_rev(tp) == ASIC_REV_5785)
14989                         tg3_get_5787_nvram_info(tp);
14990                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14991                         tg3_get_5761_nvram_info(tp);
14992                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14993                         tg3_get_5906_nvram_info(tp);
14994                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14995                          tg3_flag(tp, 57765_CLASS))
14996                         tg3_get_57780_nvram_info(tp);
14997                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14998                          tg3_asic_rev(tp) == ASIC_REV_5719)
14999                         tg3_get_5717_nvram_info(tp);
15000                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15001                          tg3_asic_rev(tp) == ASIC_REV_5762)
15002                         tg3_get_5720_nvram_info(tp);
15003                 else
15004                         tg3_get_nvram_info(tp);
15005
15006                 if (tp->nvram_size == 0)
15007                         tg3_get_nvram_size(tp);
15008
15009                 tg3_disable_nvram_access(tp);
15010                 tg3_nvram_unlock(tp);
15011
15012         } else {
15013                 tg3_flag_clear(tp, NVRAM);
15014                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15015
15016                 tg3_get_eeprom_size(tp);
15017         }
15018 }
15019
15020 struct subsys_tbl_ent {
15021         u16 subsys_vendor, subsys_devid;
15022         u32 phy_id;
15023 };
15024
15025 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15026         /* Broadcom boards. */
15027         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15028           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15029         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15031         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15033         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15035         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15037         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15039         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15041         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15042           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15043         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15044           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15045         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15046           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15047         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15048           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15049
15050         /* 3com boards. */
15051         { TG3PCI_SUBVENDOR_ID_3COM,
15052           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15053         { TG3PCI_SUBVENDOR_ID_3COM,
15054           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15055         { TG3PCI_SUBVENDOR_ID_3COM,
15056           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15057         { TG3PCI_SUBVENDOR_ID_3COM,
15058           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15059         { TG3PCI_SUBVENDOR_ID_3COM,
15060           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15061
15062         /* DELL boards. */
15063         { TG3PCI_SUBVENDOR_ID_DELL,
15064           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15065         { TG3PCI_SUBVENDOR_ID_DELL,
15066           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15067         { TG3PCI_SUBVENDOR_ID_DELL,
15068           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15069         { TG3PCI_SUBVENDOR_ID_DELL,
15070           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15071
15072         /* Compaq boards. */
15073         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15074           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15075         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15076           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15077         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15078           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15079         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15080           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15081         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15082           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15083
15084         /* IBM boards. */
15085         { TG3PCI_SUBVENDOR_ID_IBM,
15086           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15087 };
15088
15089 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15090 {
15091         int i;
15092
15093         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15094                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15095                      tp->pdev->subsystem_vendor) &&
15096                     (subsys_id_to_phy_id[i].subsys_devid ==
15097                      tp->pdev->subsystem_device))
15098                         return &subsys_id_to_phy_id[i];
15099         }
15100         return NULL;
15101 }
15102
15103 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15104 {
15105         u32 val;
15106
15107         tp->phy_id = TG3_PHY_ID_INVALID;
15108         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15109
15110         /* Assume an onboard device and WOL capable by default.  */
15111         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15112         tg3_flag_set(tp, WOL_CAP);
15113
15114         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15115                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15116                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15117                         tg3_flag_set(tp, IS_NIC);
15118                 }
15119                 val = tr32(VCPU_CFGSHDW);
15120                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15121                         tg3_flag_set(tp, ASPM_WORKAROUND);
15122                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15123                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15124                         tg3_flag_set(tp, WOL_ENABLE);
15125                         device_set_wakeup_enable(&tp->pdev->dev, true);
15126                 }
15127                 goto done;
15128         }
15129
15130         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15131         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15132                 u32 nic_cfg, led_cfg;
15133                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15134                 u32 nic_phy_id, ver, eeprom_phy_id;
15135                 int eeprom_phy_serdes = 0;
15136
15137                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15138                 tp->nic_sram_data_cfg = nic_cfg;
15139
15140                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15141                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15142                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15143                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15144                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15145                     (ver > 0) && (ver < 0x100))
15146                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15147
15148                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15149                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15150
15151                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15152                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15153                     tg3_asic_rev(tp) == ASIC_REV_5720)
15154                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15155
15156                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15157                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15158                         eeprom_phy_serdes = 1;
15159
15160                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15161                 if (nic_phy_id != 0) {
15162                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15163                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15164
15165                         eeprom_phy_id  = (id1 >> 16) << 10;
15166                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15167                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15168                 } else
15169                         eeprom_phy_id = 0;
15170
15171                 tp->phy_id = eeprom_phy_id;
15172                 if (eeprom_phy_serdes) {
15173                         if (!tg3_flag(tp, 5705_PLUS))
15174                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15175                         else
15176                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15177                 }
15178
15179                 if (tg3_flag(tp, 5750_PLUS))
15180                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15181                                     SHASTA_EXT_LED_MODE_MASK);
15182                 else
15183                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15184
15185                 switch (led_cfg) {
15186                 default:
15187                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15188                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15189                         break;
15190
15191                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15192                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15193                         break;
15194
15195                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15196                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15197
15198                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15199                          * read on some older 5700/5701 bootcode.
15200                          */
15201                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15202                             tg3_asic_rev(tp) == ASIC_REV_5701)
15203                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15204
15205                         break;
15206
15207                 case SHASTA_EXT_LED_SHARED:
15208                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15209                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15210                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15211                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15212                                                  LED_CTRL_MODE_PHY_2);
15213
15214                         if (tg3_flag(tp, 5717_PLUS) ||
15215                             tg3_asic_rev(tp) == ASIC_REV_5762)
15216                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15217                                                 LED_CTRL_BLINK_RATE_MASK;
15218
15219                         break;
15220
15221                 case SHASTA_EXT_LED_MAC:
15222                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15223                         break;
15224
15225                 case SHASTA_EXT_LED_COMBO:
15226                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15227                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15228                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15229                                                  LED_CTRL_MODE_PHY_2);
15230                         break;
15231
15232                 }
15233
15234                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15235                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15236                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15237                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15238
15239                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15240                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15241
15242                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15243                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15244                         if ((tp->pdev->subsystem_vendor ==
15245                              PCI_VENDOR_ID_ARIMA) &&
15246                             (tp->pdev->subsystem_device == 0x205a ||
15247                              tp->pdev->subsystem_device == 0x2063))
15248                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15249                 } else {
15250                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15251                         tg3_flag_set(tp, IS_NIC);
15252                 }
15253
15254                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15255                         tg3_flag_set(tp, ENABLE_ASF);
15256                         if (tg3_flag(tp, 5750_PLUS))
15257                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15258                 }
15259
15260                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15261                     tg3_flag(tp, 5750_PLUS))
15262                         tg3_flag_set(tp, ENABLE_APE);
15263
15264                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15265                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15266                         tg3_flag_clear(tp, WOL_CAP);
15267
15268                 if (tg3_flag(tp, WOL_CAP) &&
15269                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15270                         tg3_flag_set(tp, WOL_ENABLE);
15271                         device_set_wakeup_enable(&tp->pdev->dev, true);
15272                 }
15273
15274                 if (cfg2 & (1 << 17))
15275                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15276
15277                 /* serdes signal pre-emphasis in register 0x590 set by */
15278                 /* bootcode if bit 18 is set */
15279                 if (cfg2 & (1 << 18))
15280                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15281
15282                 if ((tg3_flag(tp, 57765_PLUS) ||
15283                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15284                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15285                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15286                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15287
15288                 if (tg3_flag(tp, PCI_EXPRESS)) {
15289                         u32 cfg3;
15290
15291                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15292                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15293                             !tg3_flag(tp, 57765_PLUS) &&
15294                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15295                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15296                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15297                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15298                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15299                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15300                 }
15301
15302                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15303                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15304                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15305                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15306                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15307                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15308
15309                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15310                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15311         }
15312 done:
15313         if (tg3_flag(tp, WOL_CAP))
15314                 device_set_wakeup_enable(&tp->pdev->dev,
15315                                          tg3_flag(tp, WOL_ENABLE));
15316         else
15317                 device_set_wakeup_capable(&tp->pdev->dev, false);
15318 }
15319
15320 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15321 {
15322         int i, err;
15323         u32 val2, off = offset * 8;
15324
15325         err = tg3_nvram_lock(tp);
15326         if (err)
15327                 return err;
15328
15329         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15330         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15331                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15332         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15333         udelay(10);
15334
15335         for (i = 0; i < 100; i++) {
15336                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15337                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15338                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15339                         break;
15340                 }
15341                 udelay(10);
15342         }
15343
15344         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15345
15346         tg3_nvram_unlock(tp);
15347         if (val2 & APE_OTP_STATUS_CMD_DONE)
15348                 return 0;
15349
15350         return -EBUSY;
15351 }
15352
15353 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15354 {
15355         int i;
15356         u32 val;
15357
15358         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15359         tw32(OTP_CTRL, cmd);
15360
15361         /* Wait for up to 1 ms for command to execute. */
15362         for (i = 0; i < 100; i++) {
15363                 val = tr32(OTP_STATUS);
15364                 if (val & OTP_STATUS_CMD_DONE)
15365                         break;
15366                 udelay(10);
15367         }
15368
15369         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15370 }
15371
15372 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15373  * configuration is a 32-bit value that straddles the alignment boundary.
15374  * We do two 32-bit reads and then shift and merge the results.
15375  */
15376 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15377 {
15378         u32 bhalf_otp, thalf_otp;
15379
15380         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15381
15382         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15383                 return 0;
15384
15385         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15386
15387         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15388                 return 0;
15389
15390         thalf_otp = tr32(OTP_READ_DATA);
15391
15392         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15393
15394         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15395                 return 0;
15396
15397         bhalf_otp = tr32(OTP_READ_DATA);
15398
15399         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15400 }
15401
15402 static void tg3_phy_init_link_config(struct tg3 *tp)
15403 {
15404         u32 adv = ADVERTISED_Autoneg;
15405
15406         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15407                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15408                         adv |= ADVERTISED_1000baseT_Half;
15409                 adv |= ADVERTISED_1000baseT_Full;
15410         }
15411
15412         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15413                 adv |= ADVERTISED_100baseT_Half |
15414                        ADVERTISED_100baseT_Full |
15415                        ADVERTISED_10baseT_Half |
15416                        ADVERTISED_10baseT_Full |
15417                        ADVERTISED_TP;
15418         else
15419                 adv |= ADVERTISED_FIBRE;
15420
15421         tp->link_config.advertising = adv;
15422         tp->link_config.speed = SPEED_UNKNOWN;
15423         tp->link_config.duplex = DUPLEX_UNKNOWN;
15424         tp->link_config.autoneg = AUTONEG_ENABLE;
15425         tp->link_config.active_speed = SPEED_UNKNOWN;
15426         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15427
15428         tp->old_link = -1;
15429 }
15430
15431 static int tg3_phy_probe(struct tg3 *tp)
15432 {
15433         u32 hw_phy_id_1, hw_phy_id_2;
15434         u32 hw_phy_id, hw_phy_id_masked;
15435         int err;
15436
15437         /* flow control autonegotiation is default behavior */
15438         tg3_flag_set(tp, PAUSE_AUTONEG);
15439         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15440
15441         if (tg3_flag(tp, ENABLE_APE)) {
15442                 switch (tp->pci_fn) {
15443                 case 0:
15444                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15445                         break;
15446                 case 1:
15447                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15448                         break;
15449                 case 2:
15450                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15451                         break;
15452                 case 3:
15453                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15454                         break;
15455                 }
15456         }
15457
15458         if (!tg3_flag(tp, ENABLE_ASF) &&
15459             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15460             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15461                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15462                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15463
15464         if (tg3_flag(tp, USE_PHYLIB))
15465                 return tg3_phy_init(tp);
15466
15467         /* Reading the PHY ID register can conflict with ASF
15468          * firmware access to the PHY hardware.
15469          */
15470         err = 0;
15471         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15472                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15473         } else {
15474                 /* Now read the physical PHY_ID from the chip and verify
15475                  * that it is sane.  If it doesn't look good, we fall back
15476                  * to either the hard-coded table based PHY_ID and failing
15477                  * that the value found in the eeprom area.
15478                  */
15479                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15480                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15481
15482                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15483                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15484                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15485
15486                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15487         }
15488
15489         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15490                 tp->phy_id = hw_phy_id;
15491                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15492                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15493                 else
15494                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15495         } else {
15496                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15497                         /* Do nothing, phy ID already set up in
15498                          * tg3_get_eeprom_hw_cfg().
15499                          */
15500                 } else {
15501                         struct subsys_tbl_ent *p;
15502
15503                         /* No eeprom signature?  Try the hardcoded
15504                          * subsys device table.
15505                          */
15506                         p = tg3_lookup_by_subsys(tp);
15507                         if (p) {
15508                                 tp->phy_id = p->phy_id;
15509                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15510                                 /* For now we saw the IDs 0xbc050cd0,
15511                                  * 0xbc050f80 and 0xbc050c30 on devices
15512                                  * connected to an BCM4785 and there are
15513                                  * probably more. Just assume that the phy is
15514                                  * supported when it is connected to a SSB core
15515                                  * for now.
15516                                  */
15517                                 return -ENODEV;
15518                         }
15519
15520                         if (!tp->phy_id ||
15521                             tp->phy_id == TG3_PHY_ID_BCM8002)
15522                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15523                 }
15524         }
15525
15526         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15527             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15528              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15529              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15530              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15531              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15532               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15533              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15534               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15535                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15536
15537                 tp->eee.supported = SUPPORTED_100baseT_Full |
15538                                     SUPPORTED_1000baseT_Full;
15539                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15540                                      ADVERTISED_1000baseT_Full;
15541                 tp->eee.eee_enabled = 1;
15542                 tp->eee.tx_lpi_enabled = 1;
15543                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15544         }
15545
15546         tg3_phy_init_link_config(tp);
15547
15548         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15549             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15550             !tg3_flag(tp, ENABLE_APE) &&
15551             !tg3_flag(tp, ENABLE_ASF)) {
15552                 u32 bmsr, dummy;
15553
15554                 tg3_readphy(tp, MII_BMSR, &bmsr);
15555                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15556                     (bmsr & BMSR_LSTATUS))
15557                         goto skip_phy_reset;
15558
15559                 err = tg3_phy_reset(tp);
15560                 if (err)
15561                         return err;
15562
15563                 tg3_phy_set_wirespeed(tp);
15564
15565                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15566                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15567                                             tp->link_config.flowctrl);
15568
15569                         tg3_writephy(tp, MII_BMCR,
15570                                      BMCR_ANENABLE | BMCR_ANRESTART);
15571                 }
15572         }
15573
15574 skip_phy_reset:
15575         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15576                 err = tg3_init_5401phy_dsp(tp);
15577                 if (err)
15578                         return err;
15579
15580                 err = tg3_init_5401phy_dsp(tp);
15581         }
15582
15583         return err;
15584 }
15585
15586 static void tg3_read_vpd(struct tg3 *tp)
15587 {
15588         u8 *vpd_data;
15589         unsigned int len, vpdlen;
15590         int i;
15591
15592         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15593         if (!vpd_data)
15594                 goto out_no_vpd;
15595
15596         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15597                                          PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15598         if (i < 0)
15599                 goto partno;
15600
15601         if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15602                 goto partno;
15603
15604         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15605                                          PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15606         if (i < 0)
15607                 goto partno;
15608
15609         memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15610         snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15611
15612 partno:
15613         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15614                                          PCI_VPD_RO_KEYWORD_PARTNO, &len);
15615         if (i < 0)
15616                 goto out_not_found;
15617
15618         if (len > TG3_BPN_SIZE)
15619                 goto out_not_found;
15620
15621         memcpy(tp->board_part_number, &vpd_data[i], len);
15622
15623 out_not_found:
15624         kfree(vpd_data);
15625         if (tp->board_part_number[0])
15626                 return;
15627
15628 out_no_vpd:
15629         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15630                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15631                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15632                         strcpy(tp->board_part_number, "BCM5717");
15633                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15634                         strcpy(tp->board_part_number, "BCM5718");
15635                 else
15636                         goto nomatch;
15637         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15638                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15639                         strcpy(tp->board_part_number, "BCM57780");
15640                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15641                         strcpy(tp->board_part_number, "BCM57760");
15642                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15643                         strcpy(tp->board_part_number, "BCM57790");
15644                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15645                         strcpy(tp->board_part_number, "BCM57788");
15646                 else
15647                         goto nomatch;
15648         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15649                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15650                         strcpy(tp->board_part_number, "BCM57761");
15651                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15652                         strcpy(tp->board_part_number, "BCM57765");
15653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15654                         strcpy(tp->board_part_number, "BCM57781");
15655                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15656                         strcpy(tp->board_part_number, "BCM57785");
15657                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15658                         strcpy(tp->board_part_number, "BCM57791");
15659                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15660                         strcpy(tp->board_part_number, "BCM57795");
15661                 else
15662                         goto nomatch;
15663         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15664                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15665                         strcpy(tp->board_part_number, "BCM57762");
15666                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15667                         strcpy(tp->board_part_number, "BCM57766");
15668                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15669                         strcpy(tp->board_part_number, "BCM57782");
15670                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15671                         strcpy(tp->board_part_number, "BCM57786");
15672                 else
15673                         goto nomatch;
15674         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15675                 strcpy(tp->board_part_number, "BCM95906");
15676         } else {
15677 nomatch:
15678                 strcpy(tp->board_part_number, "none");
15679         }
15680 }
15681
15682 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15683 {
15684         u32 val;
15685
15686         if (tg3_nvram_read(tp, offset, &val) ||
15687             (val & 0xfc000000) != 0x0c000000 ||
15688             tg3_nvram_read(tp, offset + 4, &val) ||
15689             val != 0)
15690                 return 0;
15691
15692         return 1;
15693 }
15694
15695 static void tg3_read_bc_ver(struct tg3 *tp)
15696 {
15697         u32 val, offset, start, ver_offset;
15698         int i, dst_off;
15699         bool newver = false;
15700
15701         if (tg3_nvram_read(tp, 0xc, &offset) ||
15702             tg3_nvram_read(tp, 0x4, &start))
15703                 return;
15704
15705         offset = tg3_nvram_logical_addr(tp, offset);
15706
15707         if (tg3_nvram_read(tp, offset, &val))
15708                 return;
15709
15710         if ((val & 0xfc000000) == 0x0c000000) {
15711                 if (tg3_nvram_read(tp, offset + 4, &val))
15712                         return;
15713
15714                 if (val == 0)
15715                         newver = true;
15716         }
15717
15718         dst_off = strlen(tp->fw_ver);
15719
15720         if (newver) {
15721                 if (TG3_VER_SIZE - dst_off < 16 ||
15722                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15723                         return;
15724
15725                 offset = offset + ver_offset - start;
15726                 for (i = 0; i < 16; i += 4) {
15727                         __be32 v;
15728                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15729                                 return;
15730
15731                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15732                 }
15733         } else {
15734                 u32 major, minor;
15735
15736                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15737                         return;
15738
15739                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15740                         TG3_NVM_BCVER_MAJSFT;
15741                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15742                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15743                          "v%d.%02d", major, minor);
15744         }
15745 }
15746
15747 static void tg3_read_hwsb_ver(struct tg3 *tp)
15748 {
15749         u32 val, major, minor;
15750
15751         /* Use native endian representation */
15752         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15753                 return;
15754
15755         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15756                 TG3_NVM_HWSB_CFG1_MAJSFT;
15757         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15758                 TG3_NVM_HWSB_CFG1_MINSFT;
15759
15760         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15761 }
15762
15763 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15764 {
15765         u32 offset, major, minor, build;
15766
15767         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15768
15769         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15770                 return;
15771
15772         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15773         case TG3_EEPROM_SB_REVISION_0:
15774                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15775                 break;
15776         case TG3_EEPROM_SB_REVISION_2:
15777                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15778                 break;
15779         case TG3_EEPROM_SB_REVISION_3:
15780                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15781                 break;
15782         case TG3_EEPROM_SB_REVISION_4:
15783                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15784                 break;
15785         case TG3_EEPROM_SB_REVISION_5:
15786                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15787                 break;
15788         case TG3_EEPROM_SB_REVISION_6:
15789                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15790                 break;
15791         default:
15792                 return;
15793         }
15794
15795         if (tg3_nvram_read(tp, offset, &val))
15796                 return;
15797
15798         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15799                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15800         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15801                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15802         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15803
15804         if (minor > 99 || build > 26)
15805                 return;
15806
15807         offset = strlen(tp->fw_ver);
15808         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15809                  " v%d.%02d", major, minor);
15810
15811         if (build > 0) {
15812                 offset = strlen(tp->fw_ver);
15813                 if (offset < TG3_VER_SIZE - 1)
15814                         tp->fw_ver[offset] = 'a' + build - 1;
15815         }
15816 }
15817
15818 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15819 {
15820         u32 val, offset, start;
15821         int i, vlen;
15822
15823         for (offset = TG3_NVM_DIR_START;
15824              offset < TG3_NVM_DIR_END;
15825              offset += TG3_NVM_DIRENT_SIZE) {
15826                 if (tg3_nvram_read(tp, offset, &val))
15827                         return;
15828
15829                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15830                         break;
15831         }
15832
15833         if (offset == TG3_NVM_DIR_END)
15834                 return;
15835
15836         if (!tg3_flag(tp, 5705_PLUS))
15837                 start = 0x08000000;
15838         else if (tg3_nvram_read(tp, offset - 4, &start))
15839                 return;
15840
15841         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15842             !tg3_fw_img_is_valid(tp, offset) ||
15843             tg3_nvram_read(tp, offset + 8, &val))
15844                 return;
15845
15846         offset += val - start;
15847
15848         vlen = strlen(tp->fw_ver);
15849
15850         tp->fw_ver[vlen++] = ',';
15851         tp->fw_ver[vlen++] = ' ';
15852
15853         for (i = 0; i < 4; i++) {
15854                 __be32 v;
15855                 if (tg3_nvram_read_be32(tp, offset, &v))
15856                         return;
15857
15858                 offset += sizeof(v);
15859
15860                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15861                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15862                         break;
15863                 }
15864
15865                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15866                 vlen += sizeof(v);
15867         }
15868 }
15869
15870 static void tg3_probe_ncsi(struct tg3 *tp)
15871 {
15872         u32 apedata;
15873
15874         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15875         if (apedata != APE_SEG_SIG_MAGIC)
15876                 return;
15877
15878         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15879         if (!(apedata & APE_FW_STATUS_READY))
15880                 return;
15881
15882         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15883                 tg3_flag_set(tp, APE_HAS_NCSI);
15884 }
15885
15886 static void tg3_read_dash_ver(struct tg3 *tp)
15887 {
15888         int vlen;
15889         u32 apedata;
15890         char *fwtype;
15891
15892         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15893
15894         if (tg3_flag(tp, APE_HAS_NCSI))
15895                 fwtype = "NCSI";
15896         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15897                 fwtype = "SMASH";
15898         else
15899                 fwtype = "DASH";
15900
15901         vlen = strlen(tp->fw_ver);
15902
15903         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15904                  fwtype,
15905                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15906                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15907                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15908                  (apedata & APE_FW_VERSION_BLDMSK));
15909 }
15910
15911 static void tg3_read_otp_ver(struct tg3 *tp)
15912 {
15913         u32 val, val2;
15914
15915         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15916                 return;
15917
15918         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15919             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15920             TG3_OTP_MAGIC0_VALID(val)) {
15921                 u64 val64 = (u64) val << 32 | val2;
15922                 u32 ver = 0;
15923                 int i, vlen;
15924
15925                 for (i = 0; i < 7; i++) {
15926                         if ((val64 & 0xff) == 0)
15927                                 break;
15928                         ver = val64 & 0xff;
15929                         val64 >>= 8;
15930                 }
15931                 vlen = strlen(tp->fw_ver);
15932                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15933         }
15934 }
15935
15936 static void tg3_read_fw_ver(struct tg3 *tp)
15937 {
15938         u32 val;
15939         bool vpd_vers = false;
15940
15941         if (tp->fw_ver[0] != 0)
15942                 vpd_vers = true;
15943
15944         if (tg3_flag(tp, NO_NVRAM)) {
15945                 strcat(tp->fw_ver, "sb");
15946                 tg3_read_otp_ver(tp);
15947                 return;
15948         }
15949
15950         if (tg3_nvram_read(tp, 0, &val))
15951                 return;
15952
15953         if (val == TG3_EEPROM_MAGIC)
15954                 tg3_read_bc_ver(tp);
15955         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15956                 tg3_read_sb_ver(tp, val);
15957         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15958                 tg3_read_hwsb_ver(tp);
15959
15960         if (tg3_flag(tp, ENABLE_ASF)) {
15961                 if (tg3_flag(tp, ENABLE_APE)) {
15962                         tg3_probe_ncsi(tp);
15963                         if (!vpd_vers)
15964                                 tg3_read_dash_ver(tp);
15965                 } else if (!vpd_vers) {
15966                         tg3_read_mgmtfw_ver(tp);
15967                 }
15968         }
15969
15970         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15971 }
15972
15973 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15974 {
15975         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15976                 return TG3_RX_RET_MAX_SIZE_5717;
15977         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15978                 return TG3_RX_RET_MAX_SIZE_5700;
15979         else
15980                 return TG3_RX_RET_MAX_SIZE_5705;
15981 }
15982
15983 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15984         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15985         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15986         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15987         { },
15988 };
15989
15990 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15991 {
15992         struct pci_dev *peer;
15993         unsigned int func, devnr = tp->pdev->devfn & ~7;
15994
15995         for (func = 0; func < 8; func++) {
15996                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15997                 if (peer && peer != tp->pdev)
15998                         break;
15999                 pci_dev_put(peer);
16000         }
16001         /* 5704 can be configured in single-port mode, set peer to
16002          * tp->pdev in that case.
16003          */
16004         if (!peer) {
16005                 peer = tp->pdev;
16006                 return peer;
16007         }
16008
16009         /*
16010          * We don't need to keep the refcount elevated; there's no way
16011          * to remove one half of this device without removing the other
16012          */
16013         pci_dev_put(peer);
16014
16015         return peer;
16016 }
16017
16018 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16019 {
16020         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16021         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16022                 u32 reg;
16023
16024                 /* All devices that use the alternate
16025                  * ASIC REV location have a CPMU.
16026                  */
16027                 tg3_flag_set(tp, CPMU_PRESENT);
16028
16029                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16030                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16031                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16032                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16034                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16036                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16038                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16040                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16041                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16042                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16043                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16045                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16046                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16047                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16048                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16049                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16050                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16051                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16052                 else
16053                         reg = TG3PCI_PRODID_ASICREV;
16054
16055                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16056         }
16057
16058         /* Wrong chip ID in 5752 A0. This code can be removed later
16059          * as A0 is not in production.
16060          */
16061         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16062                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16063
16064         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16065                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16066
16067         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16068             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16069             tg3_asic_rev(tp) == ASIC_REV_5720)
16070                 tg3_flag_set(tp, 5717_PLUS);
16071
16072         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16073             tg3_asic_rev(tp) == ASIC_REV_57766)
16074                 tg3_flag_set(tp, 57765_CLASS);
16075
16076         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16077              tg3_asic_rev(tp) == ASIC_REV_5762)
16078                 tg3_flag_set(tp, 57765_PLUS);
16079
16080         /* Intentionally exclude ASIC_REV_5906 */
16081         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16082             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16083             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16084             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16085             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16086             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16087             tg3_flag(tp, 57765_PLUS))
16088                 tg3_flag_set(tp, 5755_PLUS);
16089
16090         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16091             tg3_asic_rev(tp) == ASIC_REV_5714)
16092                 tg3_flag_set(tp, 5780_CLASS);
16093
16094         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16095             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16096             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16097             tg3_flag(tp, 5755_PLUS) ||
16098             tg3_flag(tp, 5780_CLASS))
16099                 tg3_flag_set(tp, 5750_PLUS);
16100
16101         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16102             tg3_flag(tp, 5750_PLUS))
16103                 tg3_flag_set(tp, 5705_PLUS);
16104 }
16105
16106 static bool tg3_10_100_only_device(struct tg3 *tp,
16107                                    const struct pci_device_id *ent)
16108 {
16109         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16110
16111         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16112              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16113             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16114                 return true;
16115
16116         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16117                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16118                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16119                                 return true;
16120                 } else {
16121                         return true;
16122                 }
16123         }
16124
16125         return false;
16126 }
16127
16128 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16129 {
16130         u32 misc_ctrl_reg;
16131         u32 pci_state_reg, grc_misc_cfg;
16132         u32 val;
16133         u16 pci_cmd;
16134         int err;
16135
16136         /* Force memory write invalidate off.  If we leave it on,
16137          * then on 5700_BX chips we have to enable a workaround.
16138          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16139          * to match the cacheline size.  The Broadcom driver have this
16140          * workaround but turns MWI off all the times so never uses
16141          * it.  This seems to suggest that the workaround is insufficient.
16142          */
16143         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16144         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16145         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16146
16147         /* Important! -- Make sure register accesses are byteswapped
16148          * correctly.  Also, for those chips that require it, make
16149          * sure that indirect register accesses are enabled before
16150          * the first operation.
16151          */
16152         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16153                               &misc_ctrl_reg);
16154         tp->misc_host_ctrl |= (misc_ctrl_reg &
16155                                MISC_HOST_CTRL_CHIPREV);
16156         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16157                                tp->misc_host_ctrl);
16158
16159         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16160
16161         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16162          * we need to disable memory and use config. cycles
16163          * only to access all registers. The 5702/03 chips
16164          * can mistakenly decode the special cycles from the
16165          * ICH chipsets as memory write cycles, causing corruption
16166          * of register and memory space. Only certain ICH bridges
16167          * will drive special cycles with non-zero data during the
16168          * address phase which can fall within the 5703's address
16169          * range. This is not an ICH bug as the PCI spec allows
16170          * non-zero address during special cycles. However, only
16171          * these ICH bridges are known to drive non-zero addresses
16172          * during special cycles.
16173          *
16174          * Since special cycles do not cross PCI bridges, we only
16175          * enable this workaround if the 5703 is on the secondary
16176          * bus of these ICH bridges.
16177          */
16178         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16179             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16180                 static struct tg3_dev_id {
16181                         u32     vendor;
16182                         u32     device;
16183                         u32     rev;
16184                 } ich_chipsets[] = {
16185                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16186                           PCI_ANY_ID },
16187                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16188                           PCI_ANY_ID },
16189                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16190                           0xa },
16191                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16192                           PCI_ANY_ID },
16193                         { },
16194                 };
16195                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16196                 struct pci_dev *bridge = NULL;
16197
16198                 while (pci_id->vendor != 0) {
16199                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16200                                                 bridge);
16201                         if (!bridge) {
16202                                 pci_id++;
16203                                 continue;
16204                         }
16205                         if (pci_id->rev != PCI_ANY_ID) {
16206                                 if (bridge->revision > pci_id->rev)
16207                                         continue;
16208                         }
16209                         if (bridge->subordinate &&
16210                             (bridge->subordinate->number ==
16211                              tp->pdev->bus->number)) {
16212                                 tg3_flag_set(tp, ICH_WORKAROUND);
16213                                 pci_dev_put(bridge);
16214                                 break;
16215                         }
16216                 }
16217         }
16218
16219         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16220                 static struct tg3_dev_id {
16221                         u32     vendor;
16222                         u32     device;
16223                 } bridge_chipsets[] = {
16224                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16225                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16226                         { },
16227                 };
16228                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16229                 struct pci_dev *bridge = NULL;
16230
16231                 while (pci_id->vendor != 0) {
16232                         bridge = pci_get_device(pci_id->vendor,
16233                                                 pci_id->device,
16234                                                 bridge);
16235                         if (!bridge) {
16236                                 pci_id++;
16237                                 continue;
16238                         }
16239                         if (bridge->subordinate &&
16240                             (bridge->subordinate->number <=
16241                              tp->pdev->bus->number) &&
16242                             (bridge->subordinate->busn_res.end >=
16243                              tp->pdev->bus->number)) {
16244                                 tg3_flag_set(tp, 5701_DMA_BUG);
16245                                 pci_dev_put(bridge);
16246                                 break;
16247                         }
16248                 }
16249         }
16250
16251         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16252          * DMA addresses > 40-bit. This bridge may have other additional
16253          * 57xx devices behind it in some 4-port NIC designs for example.
16254          * Any tg3 device found behind the bridge will also need the 40-bit
16255          * DMA workaround.
16256          */
16257         if (tg3_flag(tp, 5780_CLASS)) {
16258                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16259                 tp->msi_cap = tp->pdev->msi_cap;
16260         } else {
16261                 struct pci_dev *bridge = NULL;
16262
16263                 do {
16264                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16265                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16266                                                 bridge);
16267                         if (bridge && bridge->subordinate &&
16268                             (bridge->subordinate->number <=
16269                              tp->pdev->bus->number) &&
16270                             (bridge->subordinate->busn_res.end >=
16271                              tp->pdev->bus->number)) {
16272                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16273                                 pci_dev_put(bridge);
16274                                 break;
16275                         }
16276                 } while (bridge);
16277         }
16278
16279         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16280             tg3_asic_rev(tp) == ASIC_REV_5714)
16281                 tp->pdev_peer = tg3_find_peer(tp);
16282
16283         /* Determine TSO capabilities */
16284         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16285                 ; /* Do nothing. HW bug. */
16286         else if (tg3_flag(tp, 57765_PLUS))
16287                 tg3_flag_set(tp, HW_TSO_3);
16288         else if (tg3_flag(tp, 5755_PLUS) ||
16289                  tg3_asic_rev(tp) == ASIC_REV_5906)
16290                 tg3_flag_set(tp, HW_TSO_2);
16291         else if (tg3_flag(tp, 5750_PLUS)) {
16292                 tg3_flag_set(tp, HW_TSO_1);
16293                 tg3_flag_set(tp, TSO_BUG);
16294                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16295                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16296                         tg3_flag_clear(tp, TSO_BUG);
16297         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16298                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16299                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16300                 tg3_flag_set(tp, FW_TSO);
16301                 tg3_flag_set(tp, TSO_BUG);
16302                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16303                         tp->fw_needed = FIRMWARE_TG3TSO5;
16304                 else
16305                         tp->fw_needed = FIRMWARE_TG3TSO;
16306         }
16307
16308         /* Selectively allow TSO based on operating conditions */
16309         if (tg3_flag(tp, HW_TSO_1) ||
16310             tg3_flag(tp, HW_TSO_2) ||
16311             tg3_flag(tp, HW_TSO_3) ||
16312             tg3_flag(tp, FW_TSO)) {
16313                 /* For firmware TSO, assume ASF is disabled.
16314                  * We'll disable TSO later if we discover ASF
16315                  * is enabled in tg3_get_eeprom_hw_cfg().
16316                  */
16317                 tg3_flag_set(tp, TSO_CAPABLE);
16318         } else {
16319                 tg3_flag_clear(tp, TSO_CAPABLE);
16320                 tg3_flag_clear(tp, TSO_BUG);
16321                 tp->fw_needed = NULL;
16322         }
16323
16324         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16325                 tp->fw_needed = FIRMWARE_TG3;
16326
16327         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16328                 tp->fw_needed = FIRMWARE_TG357766;
16329
16330         tp->irq_max = 1;
16331
16332         if (tg3_flag(tp, 5750_PLUS)) {
16333                 tg3_flag_set(tp, SUPPORT_MSI);
16334                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16335                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16336                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16337                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16338                      tp->pdev_peer == tp->pdev))
16339                         tg3_flag_clear(tp, SUPPORT_MSI);
16340
16341                 if (tg3_flag(tp, 5755_PLUS) ||
16342                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16343                         tg3_flag_set(tp, 1SHOT_MSI);
16344                 }
16345
16346                 if (tg3_flag(tp, 57765_PLUS)) {
16347                         tg3_flag_set(tp, SUPPORT_MSIX);
16348                         tp->irq_max = TG3_IRQ_MAX_VECS;
16349                 }
16350         }
16351
16352         tp->txq_max = 1;
16353         tp->rxq_max = 1;
16354         if (tp->irq_max > 1) {
16355                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16356                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16357
16358                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16359                     tg3_asic_rev(tp) == ASIC_REV_5720)
16360                         tp->txq_max = tp->irq_max - 1;
16361         }
16362
16363         if (tg3_flag(tp, 5755_PLUS) ||
16364             tg3_asic_rev(tp) == ASIC_REV_5906)
16365                 tg3_flag_set(tp, SHORT_DMA_BUG);
16366
16367         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16368                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16369
16370         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16371             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16372             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16373             tg3_asic_rev(tp) == ASIC_REV_5762)
16374                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16375
16376         if (tg3_flag(tp, 57765_PLUS) &&
16377             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16378                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16379
16380         if (!tg3_flag(tp, 5705_PLUS) ||
16381             tg3_flag(tp, 5780_CLASS) ||
16382             tg3_flag(tp, USE_JUMBO_BDFLAG))
16383                 tg3_flag_set(tp, JUMBO_CAPABLE);
16384
16385         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16386                               &pci_state_reg);
16387
16388         if (pci_is_pcie(tp->pdev)) {
16389                 u16 lnkctl;
16390
16391                 tg3_flag_set(tp, PCI_EXPRESS);
16392
16393                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16394                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16395                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16396                                 tg3_flag_clear(tp, HW_TSO_2);
16397                                 tg3_flag_clear(tp, TSO_CAPABLE);
16398                         }
16399                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16400                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16401                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16402                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16403                                 tg3_flag_set(tp, CLKREQ_BUG);
16404                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16405                         tg3_flag_set(tp, L1PLLPD_EN);
16406                 }
16407         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16408                 /* BCM5785 devices are effectively PCIe devices, and should
16409                  * follow PCIe codepaths, but do not have a PCIe capabilities
16410                  * section.
16411                  */
16412                 tg3_flag_set(tp, PCI_EXPRESS);
16413         } else if (!tg3_flag(tp, 5705_PLUS) ||
16414                    tg3_flag(tp, 5780_CLASS)) {
16415                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16416                 if (!tp->pcix_cap) {
16417                         dev_err(&tp->pdev->dev,
16418                                 "Cannot find PCI-X capability, aborting\n");
16419                         return -EIO;
16420                 }
16421
16422                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16423                         tg3_flag_set(tp, PCIX_MODE);
16424         }
16425
16426         /* If we have an AMD 762 or VIA K8T800 chipset, write
16427          * reordering to the mailbox registers done by the host
16428          * controller can cause major troubles.  We read back from
16429          * every mailbox register write to force the writes to be
16430          * posted to the chip in order.
16431          */
16432         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16433             !tg3_flag(tp, PCI_EXPRESS))
16434                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16435
16436         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16437                              &tp->pci_cacheline_sz);
16438         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16439                              &tp->pci_lat_timer);
16440         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16441             tp->pci_lat_timer < 64) {
16442                 tp->pci_lat_timer = 64;
16443                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16444                                       tp->pci_lat_timer);
16445         }
16446
16447         /* Important! -- It is critical that the PCI-X hw workaround
16448          * situation is decided before the first MMIO register access.
16449          */
16450         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16451                 /* 5700 BX chips need to have their TX producer index
16452                  * mailboxes written twice to workaround a bug.
16453                  */
16454                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16455
16456                 /* If we are in PCI-X mode, enable register write workaround.
16457                  *
16458                  * The workaround is to use indirect register accesses
16459                  * for all chip writes not to mailbox registers.
16460                  */
16461                 if (tg3_flag(tp, PCIX_MODE)) {
16462                         u32 pm_reg;
16463
16464                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16465
16466                         /* The chip can have it's power management PCI config
16467                          * space registers clobbered due to this bug.
16468                          * So explicitly force the chip into D0 here.
16469                          */
16470                         pci_read_config_dword(tp->pdev,
16471                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16472                                               &pm_reg);
16473                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16474                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16475                         pci_write_config_dword(tp->pdev,
16476                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16477                                                pm_reg);
16478
16479                         /* Also, force SERR#/PERR# in PCI command. */
16480                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16481                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16482                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16483                 }
16484         }
16485
16486         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16487                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16488         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16489                 tg3_flag_set(tp, PCI_32BIT);
16490
16491         /* Chip-specific fixup from Broadcom driver */
16492         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16493             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16494                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16495                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16496         }
16497
16498         /* Default fast path register access methods */
16499         tp->read32 = tg3_read32;
16500         tp->write32 = tg3_write32;
16501         tp->read32_mbox = tg3_read32;
16502         tp->write32_mbox = tg3_write32;
16503         tp->write32_tx_mbox = tg3_write32;
16504         tp->write32_rx_mbox = tg3_write32;
16505
16506         /* Various workaround register access methods */
16507         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16508                 tp->write32 = tg3_write_indirect_reg32;
16509         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16510                  (tg3_flag(tp, PCI_EXPRESS) &&
16511                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16512                 /*
16513                  * Back to back register writes can cause problems on these
16514                  * chips, the workaround is to read back all reg writes
16515                  * except those to mailbox regs.
16516                  *
16517                  * See tg3_write_indirect_reg32().
16518                  */
16519                 tp->write32 = tg3_write_flush_reg32;
16520         }
16521
16522         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16523                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16524                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16525                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16526         }
16527
16528         if (tg3_flag(tp, ICH_WORKAROUND)) {
16529                 tp->read32 = tg3_read_indirect_reg32;
16530                 tp->write32 = tg3_write_indirect_reg32;
16531                 tp->read32_mbox = tg3_read_indirect_mbox;
16532                 tp->write32_mbox = tg3_write_indirect_mbox;
16533                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16534                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16535
16536                 iounmap(tp->regs);
16537                 tp->regs = NULL;
16538
16539                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16540                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16541                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16542         }
16543         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16544                 tp->read32_mbox = tg3_read32_mbox_5906;
16545                 tp->write32_mbox = tg3_write32_mbox_5906;
16546                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16547                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16548         }
16549
16550         if (tp->write32 == tg3_write_indirect_reg32 ||
16551             (tg3_flag(tp, PCIX_MODE) &&
16552              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16553               tg3_asic_rev(tp) == ASIC_REV_5701)))
16554                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16555
16556         /* The memory arbiter has to be enabled in order for SRAM accesses
16557          * to succeed.  Normally on powerup the tg3 chip firmware will make
16558          * sure it is enabled, but other entities such as system netboot
16559          * code might disable it.
16560          */
16561         val = tr32(MEMARB_MODE);
16562         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16563
16564         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16565         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16566             tg3_flag(tp, 5780_CLASS)) {
16567                 if (tg3_flag(tp, PCIX_MODE)) {
16568                         pci_read_config_dword(tp->pdev,
16569                                               tp->pcix_cap + PCI_X_STATUS,
16570                                               &val);
16571                         tp->pci_fn = val & 0x7;
16572                 }
16573         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16574                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16575                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16576                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16577                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16578                         val = tr32(TG3_CPMU_STATUS);
16579
16580                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16581                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16582                 else
16583                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16584                                      TG3_CPMU_STATUS_FSHFT_5719;
16585         }
16586
16587         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16588                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16589                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16590         }
16591
16592         /* Get eeprom hw config before calling tg3_set_power_state().
16593          * In particular, the TG3_FLAG_IS_NIC flag must be
16594          * determined before calling tg3_set_power_state() so that
16595          * we know whether or not to switch out of Vaux power.
16596          * When the flag is set, it means that GPIO1 is used for eeprom
16597          * write protect and also implies that it is a LOM where GPIOs
16598          * are not used to switch power.
16599          */
16600         tg3_get_eeprom_hw_cfg(tp);
16601
16602         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16603                 tg3_flag_clear(tp, TSO_CAPABLE);
16604                 tg3_flag_clear(tp, TSO_BUG);
16605                 tp->fw_needed = NULL;
16606         }
16607
16608         if (tg3_flag(tp, ENABLE_APE)) {
16609                 /* Allow reads and writes to the
16610                  * APE register and memory space.
16611                  */
16612                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16613                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16614                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16615                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16616                                        pci_state_reg);
16617
16618                 tg3_ape_lock_init(tp);
16619                 tp->ape_hb_interval =
16620                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16621         }
16622
16623         /* Set up tp->grc_local_ctrl before calling
16624          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16625          * will bring 5700's external PHY out of reset.
16626          * It is also used as eeprom write protect on LOMs.
16627          */
16628         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16629         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16630             tg3_flag(tp, EEPROM_WRITE_PROT))
16631                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16632                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16633         /* Unused GPIO3 must be driven as output on 5752 because there
16634          * are no pull-up resistors on unused GPIO pins.
16635          */
16636         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16637                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16638
16639         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16640             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16641             tg3_flag(tp, 57765_CLASS))
16642                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16643
16644         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16645             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16646                 /* Turn off the debug UART. */
16647                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16648                 if (tg3_flag(tp, IS_NIC))
16649                         /* Keep VMain power. */
16650                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16651                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16652         }
16653
16654         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16655                 tp->grc_local_ctrl |=
16656                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16657
16658         /* Switch out of Vaux if it is a NIC */
16659         tg3_pwrsrc_switch_to_vmain(tp);
16660
16661         /* Derive initial jumbo mode from MTU assigned in
16662          * ether_setup() via the alloc_etherdev() call
16663          */
16664         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16665                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16666
16667         /* Determine WakeOnLan speed to use. */
16668         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16669             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16670             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16671             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16672                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16673         } else {
16674                 tg3_flag_set(tp, WOL_SPEED_100MB);
16675         }
16676
16677         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16678                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16679
16680         /* A few boards don't want Ethernet@WireSpeed phy feature */
16681         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16682             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16683              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16684              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16685             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16686             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16687                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16688
16689         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16690             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16691                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16692         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16693                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16694
16695         if (tg3_flag(tp, 5705_PLUS) &&
16696             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16697             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16698             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16699             !tg3_flag(tp, 57765_PLUS)) {
16700                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16701                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16702                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16703                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16704                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16705                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16706                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16707                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16708                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16709                 } else
16710                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16711         }
16712
16713         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16714             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16715                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16716                 if (tp->phy_otp == 0)
16717                         tp->phy_otp = TG3_OTP_DEFAULT;
16718         }
16719
16720         if (tg3_flag(tp, CPMU_PRESENT))
16721                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16722         else
16723                 tp->mi_mode = MAC_MI_MODE_BASE;
16724
16725         tp->coalesce_mode = 0;
16726         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16727             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16728                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16729
16730         /* Set these bits to enable statistics workaround. */
16731         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16732             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16733             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16734             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16735                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16736                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16737         }
16738
16739         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16740             tg3_asic_rev(tp) == ASIC_REV_57780)
16741                 tg3_flag_set(tp, USE_PHYLIB);
16742
16743         err = tg3_mdio_init(tp);
16744         if (err)
16745                 return err;
16746
16747         /* Initialize data/descriptor byte/word swapping. */
16748         val = tr32(GRC_MODE);
16749         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16750             tg3_asic_rev(tp) == ASIC_REV_5762)
16751                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16752                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16753                         GRC_MODE_B2HRX_ENABLE |
16754                         GRC_MODE_HTX2B_ENABLE |
16755                         GRC_MODE_HOST_STACKUP);
16756         else
16757                 val &= GRC_MODE_HOST_STACKUP;
16758
16759         tw32(GRC_MODE, val | tp->grc_mode);
16760
16761         tg3_switch_clocks(tp);
16762
16763         /* Clear this out for sanity. */
16764         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16765
16766         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16767         tw32(TG3PCI_REG_BASE_ADDR, 0);
16768
16769         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16770                               &pci_state_reg);
16771         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16772             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16773                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16774                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16775                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16776                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16777                         void __iomem *sram_base;
16778
16779                         /* Write some dummy words into the SRAM status block
16780                          * area, see if it reads back correctly.  If the return
16781                          * value is bad, force enable the PCIX workaround.
16782                          */
16783                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16784
16785                         writel(0x00000000, sram_base);
16786                         writel(0x00000000, sram_base + 4);
16787                         writel(0xffffffff, sram_base + 4);
16788                         if (readl(sram_base) != 0x00000000)
16789                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16790                 }
16791         }
16792
16793         udelay(50);
16794         tg3_nvram_init(tp);
16795
16796         /* If the device has an NVRAM, no need to load patch firmware */
16797         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16798             !tg3_flag(tp, NO_NVRAM))
16799                 tp->fw_needed = NULL;
16800
16801         grc_misc_cfg = tr32(GRC_MISC_CFG);
16802         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16803
16804         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16805             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16806              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16807                 tg3_flag_set(tp, IS_5788);
16808
16809         if (!tg3_flag(tp, IS_5788) &&
16810             tg3_asic_rev(tp) != ASIC_REV_5700)
16811                 tg3_flag_set(tp, TAGGED_STATUS);
16812         if (tg3_flag(tp, TAGGED_STATUS)) {
16813                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16814                                       HOSTCC_MODE_CLRTICK_TXBD);
16815
16816                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16817                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16818                                        tp->misc_host_ctrl);
16819         }
16820
16821         /* Preserve the APE MAC_MODE bits */
16822         if (tg3_flag(tp, ENABLE_APE))
16823                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16824         else
16825                 tp->mac_mode = 0;
16826
16827         if (tg3_10_100_only_device(tp, ent))
16828                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16829
16830         err = tg3_phy_probe(tp);
16831         if (err) {
16832                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16833                 /* ... but do not return immediately ... */
16834                 tg3_mdio_fini(tp);
16835         }
16836
16837         tg3_read_vpd(tp);
16838         tg3_read_fw_ver(tp);
16839
16840         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16841                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16842         } else {
16843                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16844                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16845                 else
16846                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16847         }
16848
16849         /* 5700 {AX,BX} chips have a broken status block link
16850          * change bit implementation, so we must use the
16851          * status register in those cases.
16852          */
16853         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16854                 tg3_flag_set(tp, USE_LINKCHG_REG);
16855         else
16856                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16857
16858         /* The led_ctrl is set during tg3_phy_probe, here we might
16859          * have to force the link status polling mechanism based
16860          * upon subsystem IDs.
16861          */
16862         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16863             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16864             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16865                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16866                 tg3_flag_set(tp, USE_LINKCHG_REG);
16867         }
16868
16869         /* For all SERDES we poll the MAC status register. */
16870         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16871                 tg3_flag_set(tp, POLL_SERDES);
16872         else
16873                 tg3_flag_clear(tp, POLL_SERDES);
16874
16875         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16876                 tg3_flag_set(tp, POLL_CPMU_LINK);
16877
16878         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16879         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16880         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16881             tg3_flag(tp, PCIX_MODE)) {
16882                 tp->rx_offset = NET_SKB_PAD;
16883 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16884                 tp->rx_copy_thresh = ~(u16)0;
16885 #endif
16886         }
16887
16888         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16889         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16890         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16891
16892         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16893
16894         /* Increment the rx prod index on the rx std ring by at most
16895          * 8 for these chips to workaround hw errata.
16896          */
16897         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16898             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16899             tg3_asic_rev(tp) == ASIC_REV_5755)
16900                 tp->rx_std_max_post = 8;
16901
16902         if (tg3_flag(tp, ASPM_WORKAROUND))
16903                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16904                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16905
16906         return err;
16907 }
16908
16909 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16910 {
16911         u32 hi, lo, mac_offset;
16912         int addr_ok = 0;
16913         int err;
16914
16915         if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16916                 return 0;
16917
16918         if (tg3_flag(tp, IS_SSB_CORE)) {
16919                 err = ssb_gige_get_macaddr(tp->pdev, addr);
16920                 if (!err && is_valid_ether_addr(addr))
16921                         return 0;
16922         }
16923
16924         mac_offset = 0x7c;
16925         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16926             tg3_flag(tp, 5780_CLASS)) {
16927                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16928                         mac_offset = 0xcc;
16929                 if (tg3_nvram_lock(tp))
16930                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16931                 else
16932                         tg3_nvram_unlock(tp);
16933         } else if (tg3_flag(tp, 5717_PLUS)) {
16934                 if (tp->pci_fn & 1)
16935                         mac_offset = 0xcc;
16936                 if (tp->pci_fn > 1)
16937                         mac_offset += 0x18c;
16938         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16939                 mac_offset = 0x10;
16940
16941         /* First try to get it from MAC address mailbox. */
16942         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16943         if ((hi >> 16) == 0x484b) {
16944                 addr[0] = (hi >>  8) & 0xff;
16945                 addr[1] = (hi >>  0) & 0xff;
16946
16947                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16948                 addr[2] = (lo >> 24) & 0xff;
16949                 addr[3] = (lo >> 16) & 0xff;
16950                 addr[4] = (lo >>  8) & 0xff;
16951                 addr[5] = (lo >>  0) & 0xff;
16952
16953                 /* Some old bootcode may report a 0 MAC address in SRAM */
16954                 addr_ok = is_valid_ether_addr(addr);
16955         }
16956         if (!addr_ok) {
16957                 /* Next, try NVRAM. */
16958                 if (!tg3_flag(tp, NO_NVRAM) &&
16959                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16960                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16961                         memcpy(&addr[0], ((char *)&hi) + 2, 2);
16962                         memcpy(&addr[2], (char *)&lo, sizeof(lo));
16963                 }
16964                 /* Finally just fetch it out of the MAC control regs. */
16965                 else {
16966                         hi = tr32(MAC_ADDR_0_HIGH);
16967                         lo = tr32(MAC_ADDR_0_LOW);
16968
16969                         addr[5] = lo & 0xff;
16970                         addr[4] = (lo >> 8) & 0xff;
16971                         addr[3] = (lo >> 16) & 0xff;
16972                         addr[2] = (lo >> 24) & 0xff;
16973                         addr[1] = hi & 0xff;
16974                         addr[0] = (hi >> 8) & 0xff;
16975                 }
16976         }
16977
16978         if (!is_valid_ether_addr(addr))
16979                 return -EINVAL;
16980         return 0;
16981 }
16982
16983 #define BOUNDARY_SINGLE_CACHELINE       1
16984 #define BOUNDARY_MULTI_CACHELINE        2
16985
16986 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16987 {
16988         int cacheline_size;
16989         u8 byte;
16990         int goal;
16991
16992         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16993         if (byte == 0)
16994                 cacheline_size = 1024;
16995         else
16996                 cacheline_size = (int) byte * 4;
16997
16998         /* On 5703 and later chips, the boundary bits have no
16999          * effect.
17000          */
17001         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17002             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17003             !tg3_flag(tp, PCI_EXPRESS))
17004                 goto out;
17005
17006 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17007         goal = BOUNDARY_MULTI_CACHELINE;
17008 #else
17009 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17010         goal = BOUNDARY_SINGLE_CACHELINE;
17011 #else
17012         goal = 0;
17013 #endif
17014 #endif
17015
17016         if (tg3_flag(tp, 57765_PLUS)) {
17017                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17018                 goto out;
17019         }
17020
17021         if (!goal)
17022                 goto out;
17023
17024         /* PCI controllers on most RISC systems tend to disconnect
17025          * when a device tries to burst across a cache-line boundary.
17026          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17027          *
17028          * Unfortunately, for PCI-E there are only limited
17029          * write-side controls for this, and thus for reads
17030          * we will still get the disconnects.  We'll also waste
17031          * these PCI cycles for both read and write for chips
17032          * other than 5700 and 5701 which do not implement the
17033          * boundary bits.
17034          */
17035         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17036                 switch (cacheline_size) {
17037                 case 16:
17038                 case 32:
17039                 case 64:
17040                 case 128:
17041                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17042                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17043                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17044                         } else {
17045                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17046                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17047                         }
17048                         break;
17049
17050                 case 256:
17051                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17052                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17053                         break;
17054
17055                 default:
17056                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17057                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17058                         break;
17059                 }
17060         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17061                 switch (cacheline_size) {
17062                 case 16:
17063                 case 32:
17064                 case 64:
17065                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17066                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17067                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17068                                 break;
17069                         }
17070                         fallthrough;
17071                 case 128:
17072                 default:
17073                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17074                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17075                         break;
17076                 }
17077         } else {
17078                 switch (cacheline_size) {
17079                 case 16:
17080                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17081                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17082                                         DMA_RWCTRL_WRITE_BNDRY_16);
17083                                 break;
17084                         }
17085                         fallthrough;
17086                 case 32:
17087                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17088                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17089                                         DMA_RWCTRL_WRITE_BNDRY_32);
17090                                 break;
17091                         }
17092                         fallthrough;
17093                 case 64:
17094                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17095                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17096                                         DMA_RWCTRL_WRITE_BNDRY_64);
17097                                 break;
17098                         }
17099                         fallthrough;
17100                 case 128:
17101                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17102                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17103                                         DMA_RWCTRL_WRITE_BNDRY_128);
17104                                 break;
17105                         }
17106                         fallthrough;
17107                 case 256:
17108                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17109                                 DMA_RWCTRL_WRITE_BNDRY_256);
17110                         break;
17111                 case 512:
17112                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17113                                 DMA_RWCTRL_WRITE_BNDRY_512);
17114                         break;
17115                 case 1024:
17116                 default:
17117                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17118                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17119                         break;
17120                 }
17121         }
17122
17123 out:
17124         return val;
17125 }
17126
17127 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17128                            int size, bool to_device)
17129 {
17130         struct tg3_internal_buffer_desc test_desc;
17131         u32 sram_dma_descs;
17132         int i, ret;
17133
17134         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17135
17136         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17137         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17138         tw32(RDMAC_STATUS, 0);
17139         tw32(WDMAC_STATUS, 0);
17140
17141         tw32(BUFMGR_MODE, 0);
17142         tw32(FTQ_RESET, 0);
17143
17144         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17145         test_desc.addr_lo = buf_dma & 0xffffffff;
17146         test_desc.nic_mbuf = 0x00002100;
17147         test_desc.len = size;
17148
17149         /*
17150          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17151          * the *second* time the tg3 driver was getting loaded after an
17152          * initial scan.
17153          *
17154          * Broadcom tells me:
17155          *   ...the DMA engine is connected to the GRC block and a DMA
17156          *   reset may affect the GRC block in some unpredictable way...
17157          *   The behavior of resets to individual blocks has not been tested.
17158          *
17159          * Broadcom noted the GRC reset will also reset all sub-components.
17160          */
17161         if (to_device) {
17162                 test_desc.cqid_sqid = (13 << 8) | 2;
17163
17164                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17165                 udelay(40);
17166         } else {
17167                 test_desc.cqid_sqid = (16 << 8) | 7;
17168
17169                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17170                 udelay(40);
17171         }
17172         test_desc.flags = 0x00000005;
17173
17174         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17175                 u32 val;
17176
17177                 val = *(((u32 *)&test_desc) + i);
17178                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17179                                        sram_dma_descs + (i * sizeof(u32)));
17180                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17181         }
17182         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17183
17184         if (to_device)
17185                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17186         else
17187                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17188
17189         ret = -ENODEV;
17190         for (i = 0; i < 40; i++) {
17191                 u32 val;
17192
17193                 if (to_device)
17194                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17195                 else
17196                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17197                 if ((val & 0xffff) == sram_dma_descs) {
17198                         ret = 0;
17199                         break;
17200                 }
17201
17202                 udelay(100);
17203         }
17204
17205         return ret;
17206 }
17207
17208 #define TEST_BUFFER_SIZE        0x2000
17209
17210 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17211         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17212         { },
17213 };
17214
17215 static int tg3_test_dma(struct tg3 *tp)
17216 {
17217         dma_addr_t buf_dma;
17218         u32 *buf, saved_dma_rwctrl;
17219         int ret = 0;
17220
17221         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17222                                  &buf_dma, GFP_KERNEL);
17223         if (!buf) {
17224                 ret = -ENOMEM;
17225                 goto out_nofree;
17226         }
17227
17228         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17229                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17230
17231         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17232
17233         if (tg3_flag(tp, 57765_PLUS))
17234                 goto out;
17235
17236         if (tg3_flag(tp, PCI_EXPRESS)) {
17237                 /* DMA read watermark not used on PCIE */
17238                 tp->dma_rwctrl |= 0x00180000;
17239         } else if (!tg3_flag(tp, PCIX_MODE)) {
17240                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17241                     tg3_asic_rev(tp) == ASIC_REV_5750)
17242                         tp->dma_rwctrl |= 0x003f0000;
17243                 else
17244                         tp->dma_rwctrl |= 0x003f000f;
17245         } else {
17246                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17247                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17248                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17249                         u32 read_water = 0x7;
17250
17251                         /* If the 5704 is behind the EPB bridge, we can
17252                          * do the less restrictive ONE_DMA workaround for
17253                          * better performance.
17254                          */
17255                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17256                             tg3_asic_rev(tp) == ASIC_REV_5704)
17257                                 tp->dma_rwctrl |= 0x8000;
17258                         else if (ccval == 0x6 || ccval == 0x7)
17259                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17260
17261                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17262                                 read_water = 4;
17263                         /* Set bit 23 to enable PCIX hw bug fix */
17264                         tp->dma_rwctrl |=
17265                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17266                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17267                                 (1 << 23);
17268                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17269                         /* 5780 always in PCIX mode */
17270                         tp->dma_rwctrl |= 0x00144000;
17271                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17272                         /* 5714 always in PCIX mode */
17273                         tp->dma_rwctrl |= 0x00148000;
17274                 } else {
17275                         tp->dma_rwctrl |= 0x001b000f;
17276                 }
17277         }
17278         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17279                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17280
17281         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17282             tg3_asic_rev(tp) == ASIC_REV_5704)
17283                 tp->dma_rwctrl &= 0xfffffff0;
17284
17285         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17286             tg3_asic_rev(tp) == ASIC_REV_5701) {
17287                 /* Remove this if it causes problems for some boards. */
17288                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17289
17290                 /* On 5700/5701 chips, we need to set this bit.
17291                  * Otherwise the chip will issue cacheline transactions
17292                  * to streamable DMA memory with not all the byte
17293                  * enables turned on.  This is an error on several
17294                  * RISC PCI controllers, in particular sparc64.
17295                  *
17296                  * On 5703/5704 chips, this bit has been reassigned
17297                  * a different meaning.  In particular, it is used
17298                  * on those chips to enable a PCI-X workaround.
17299                  */
17300                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17301         }
17302
17303         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17304
17305
17306         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17307             tg3_asic_rev(tp) != ASIC_REV_5701)
17308                 goto out;
17309
17310         /* It is best to perform DMA test with maximum write burst size
17311          * to expose the 5700/5701 write DMA bug.
17312          */
17313         saved_dma_rwctrl = tp->dma_rwctrl;
17314         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17315         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17316
17317         while (1) {
17318                 u32 *p = buf, i;
17319
17320                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17321                         p[i] = i;
17322
17323                 /* Send the buffer to the chip. */
17324                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17325                 if (ret) {
17326                         dev_err(&tp->pdev->dev,
17327                                 "%s: Buffer write failed. err = %d\n",
17328                                 __func__, ret);
17329                         break;
17330                 }
17331
17332                 /* Now read it back. */
17333                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17334                 if (ret) {
17335                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17336                                 "err = %d\n", __func__, ret);
17337                         break;
17338                 }
17339
17340                 /* Verify it. */
17341                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17342                         if (p[i] == i)
17343                                 continue;
17344
17345                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17346                             DMA_RWCTRL_WRITE_BNDRY_16) {
17347                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17348                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17349                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17350                                 break;
17351                         } else {
17352                                 dev_err(&tp->pdev->dev,
17353                                         "%s: Buffer corrupted on read back! "
17354                                         "(%d != %d)\n", __func__, p[i], i);
17355                                 ret = -ENODEV;
17356                                 goto out;
17357                         }
17358                 }
17359
17360                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17361                         /* Success. */
17362                         ret = 0;
17363                         break;
17364                 }
17365         }
17366         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17367             DMA_RWCTRL_WRITE_BNDRY_16) {
17368                 /* DMA test passed without adjusting DMA boundary,
17369                  * now look for chipsets that are known to expose the
17370                  * DMA bug without failing the test.
17371                  */
17372                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17373                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17374                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17375                 } else {
17376                         /* Safe to use the calculated DMA boundary. */
17377                         tp->dma_rwctrl = saved_dma_rwctrl;
17378                 }
17379
17380                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17381         }
17382
17383 out:
17384         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17385 out_nofree:
17386         return ret;
17387 }
17388
17389 static void tg3_init_bufmgr_config(struct tg3 *tp)
17390 {
17391         if (tg3_flag(tp, 57765_PLUS)) {
17392                 tp->bufmgr_config.mbuf_read_dma_low_water =
17393                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17394                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17395                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17396                 tp->bufmgr_config.mbuf_high_water =
17397                         DEFAULT_MB_HIGH_WATER_57765;
17398
17399                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17400                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17401                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17402                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17403                 tp->bufmgr_config.mbuf_high_water_jumbo =
17404                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17405         } else if (tg3_flag(tp, 5705_PLUS)) {
17406                 tp->bufmgr_config.mbuf_read_dma_low_water =
17407                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17408                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17409                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17410                 tp->bufmgr_config.mbuf_high_water =
17411                         DEFAULT_MB_HIGH_WATER_5705;
17412                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17413                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17414                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17415                         tp->bufmgr_config.mbuf_high_water =
17416                                 DEFAULT_MB_HIGH_WATER_5906;
17417                 }
17418
17419                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17420                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17421                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17422                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17423                 tp->bufmgr_config.mbuf_high_water_jumbo =
17424                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17425         } else {
17426                 tp->bufmgr_config.mbuf_read_dma_low_water =
17427                         DEFAULT_MB_RDMA_LOW_WATER;
17428                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17429                         DEFAULT_MB_MACRX_LOW_WATER;
17430                 tp->bufmgr_config.mbuf_high_water =
17431                         DEFAULT_MB_HIGH_WATER;
17432
17433                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17434                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17435                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17436                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17437                 tp->bufmgr_config.mbuf_high_water_jumbo =
17438                         DEFAULT_MB_HIGH_WATER_JUMBO;
17439         }
17440
17441         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17442         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17443 }
17444
17445 static char *tg3_phy_string(struct tg3 *tp)
17446 {
17447         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17448         case TG3_PHY_ID_BCM5400:        return "5400";
17449         case TG3_PHY_ID_BCM5401:        return "5401";
17450         case TG3_PHY_ID_BCM5411:        return "5411";
17451         case TG3_PHY_ID_BCM5701:        return "5701";
17452         case TG3_PHY_ID_BCM5703:        return "5703";
17453         case TG3_PHY_ID_BCM5704:        return "5704";
17454         case TG3_PHY_ID_BCM5705:        return "5705";
17455         case TG3_PHY_ID_BCM5750:        return "5750";
17456         case TG3_PHY_ID_BCM5752:        return "5752";
17457         case TG3_PHY_ID_BCM5714:        return "5714";
17458         case TG3_PHY_ID_BCM5780:        return "5780";
17459         case TG3_PHY_ID_BCM5755:        return "5755";
17460         case TG3_PHY_ID_BCM5787:        return "5787";
17461         case TG3_PHY_ID_BCM5784:        return "5784";
17462         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17463         case TG3_PHY_ID_BCM5906:        return "5906";
17464         case TG3_PHY_ID_BCM5761:        return "5761";
17465         case TG3_PHY_ID_BCM5718C:       return "5718C";
17466         case TG3_PHY_ID_BCM5718S:       return "5718S";
17467         case TG3_PHY_ID_BCM57765:       return "57765";
17468         case TG3_PHY_ID_BCM5719C:       return "5719C";
17469         case TG3_PHY_ID_BCM5720C:       return "5720C";
17470         case TG3_PHY_ID_BCM5762:        return "5762C";
17471         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17472         case 0:                 return "serdes";
17473         default:                return "unknown";
17474         }
17475 }
17476
17477 static char *tg3_bus_string(struct tg3 *tp, char *str)
17478 {
17479         if (tg3_flag(tp, PCI_EXPRESS)) {
17480                 strcpy(str, "PCI Express");
17481                 return str;
17482         } else if (tg3_flag(tp, PCIX_MODE)) {
17483                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17484
17485                 strcpy(str, "PCIX:");
17486
17487                 if ((clock_ctrl == 7) ||
17488                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17489                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17490                         strcat(str, "133MHz");
17491                 else if (clock_ctrl == 0)
17492                         strcat(str, "33MHz");
17493                 else if (clock_ctrl == 2)
17494                         strcat(str, "50MHz");
17495                 else if (clock_ctrl == 4)
17496                         strcat(str, "66MHz");
17497                 else if (clock_ctrl == 6)
17498                         strcat(str, "100MHz");
17499         } else {
17500                 strcpy(str, "PCI:");
17501                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17502                         strcat(str, "66MHz");
17503                 else
17504                         strcat(str, "33MHz");
17505         }
17506         if (tg3_flag(tp, PCI_32BIT))
17507                 strcat(str, ":32-bit");
17508         else
17509                 strcat(str, ":64-bit");
17510         return str;
17511 }
17512
17513 static void tg3_init_coal(struct tg3 *tp)
17514 {
17515         struct ethtool_coalesce *ec = &tp->coal;
17516
17517         memset(ec, 0, sizeof(*ec));
17518         ec->cmd = ETHTOOL_GCOALESCE;
17519         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17520         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17521         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17522         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17523         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17524         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17525         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17526         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17527         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17528
17529         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17530                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17531                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17532                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17533                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17534                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17535         }
17536
17537         if (tg3_flag(tp, 5705_PLUS)) {
17538                 ec->rx_coalesce_usecs_irq = 0;
17539                 ec->tx_coalesce_usecs_irq = 0;
17540                 ec->stats_block_coalesce_usecs = 0;
17541         }
17542 }
17543
17544 static int tg3_init_one(struct pci_dev *pdev,
17545                                   const struct pci_device_id *ent)
17546 {
17547         struct net_device *dev;
17548         struct tg3 *tp;
17549         int i, err;
17550         u32 sndmbx, rcvmbx, intmbx;
17551         char str[40];
17552         u64 dma_mask, persist_dma_mask;
17553         netdev_features_t features = 0;
17554         u8 addr[ETH_ALEN] __aligned(2);
17555
17556         err = pci_enable_device(pdev);
17557         if (err) {
17558                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17559                 return err;
17560         }
17561
17562         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17563         if (err) {
17564                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17565                 goto err_out_disable_pdev;
17566         }
17567
17568         pci_set_master(pdev);
17569
17570         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17571         if (!dev) {
17572                 err = -ENOMEM;
17573                 goto err_out_free_res;
17574         }
17575
17576         SET_NETDEV_DEV(dev, &pdev->dev);
17577
17578         tp = netdev_priv(dev);
17579         tp->pdev = pdev;
17580         tp->dev = dev;
17581         tp->rx_mode = TG3_DEF_RX_MODE;
17582         tp->tx_mode = TG3_DEF_TX_MODE;
17583         tp->irq_sync = 1;
17584         tp->pcierr_recovery = false;
17585
17586         if (tg3_debug > 0)
17587                 tp->msg_enable = tg3_debug;
17588         else
17589                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17590
17591         if (pdev_is_ssb_gige_core(pdev)) {
17592                 tg3_flag_set(tp, IS_SSB_CORE);
17593                 if (ssb_gige_must_flush_posted_writes(pdev))
17594                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17595                 if (ssb_gige_one_dma_at_once(pdev))
17596                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17597                 if (ssb_gige_have_roboswitch(pdev)) {
17598                         tg3_flag_set(tp, USE_PHYLIB);
17599                         tg3_flag_set(tp, ROBOSWITCH);
17600                 }
17601                 if (ssb_gige_is_rgmii(pdev))
17602                         tg3_flag_set(tp, RGMII_MODE);
17603         }
17604
17605         /* The word/byte swap controls here control register access byte
17606          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17607          * setting below.
17608          */
17609         tp->misc_host_ctrl =
17610                 MISC_HOST_CTRL_MASK_PCI_INT |
17611                 MISC_HOST_CTRL_WORD_SWAP |
17612                 MISC_HOST_CTRL_INDIR_ACCESS |
17613                 MISC_HOST_CTRL_PCISTATE_RW;
17614
17615         /* The NONFRM (non-frame) byte/word swap controls take effect
17616          * on descriptor entries, anything which isn't packet data.
17617          *
17618          * The StrongARM chips on the board (one for tx, one for rx)
17619          * are running in big-endian mode.
17620          */
17621         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17622                         GRC_MODE_WSWAP_NONFRM_DATA);
17623 #ifdef __BIG_ENDIAN
17624         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17625 #endif
17626         spin_lock_init(&tp->lock);
17627         spin_lock_init(&tp->indirect_lock);
17628         INIT_WORK(&tp->reset_task, tg3_reset_task);
17629
17630         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17631         if (!tp->regs) {
17632                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17633                 err = -ENOMEM;
17634                 goto err_out_free_dev;
17635         }
17636
17637         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17638             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17639             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17640             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17641             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17642             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17643             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17644             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17645             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17646             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17647             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17648             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17649             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17650             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17651             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17652                 tg3_flag_set(tp, ENABLE_APE);
17653                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17654                 if (!tp->aperegs) {
17655                         dev_err(&pdev->dev,
17656                                 "Cannot map APE registers, aborting\n");
17657                         err = -ENOMEM;
17658                         goto err_out_iounmap;
17659                 }
17660         }
17661
17662         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17663         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17664
17665         dev->ethtool_ops = &tg3_ethtool_ops;
17666         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17667         dev->netdev_ops = &tg3_netdev_ops;
17668         dev->irq = pdev->irq;
17669
17670         err = tg3_get_invariants(tp, ent);
17671         if (err) {
17672                 dev_err(&pdev->dev,
17673                         "Problem fetching invariants of chip, aborting\n");
17674                 goto err_out_apeunmap;
17675         }
17676
17677         /* The EPB bridge inside 5714, 5715, and 5780 and any
17678          * device behind the EPB cannot support DMA addresses > 40-bit.
17679          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17680          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17681          * do DMA address check in tg3_start_xmit().
17682          */
17683         if (tg3_flag(tp, IS_5788))
17684                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17685         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17686                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17687 #ifdef CONFIG_HIGHMEM
17688                 dma_mask = DMA_BIT_MASK(64);
17689 #endif
17690         } else
17691                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17692
17693         /* Configure DMA attributes. */
17694         if (dma_mask > DMA_BIT_MASK(32)) {
17695                 err = dma_set_mask(&pdev->dev, dma_mask);
17696                 if (!err) {
17697                         features |= NETIF_F_HIGHDMA;
17698                         err = dma_set_coherent_mask(&pdev->dev,
17699                                                     persist_dma_mask);
17700                         if (err < 0) {
17701                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17702                                         "DMA for consistent allocations\n");
17703                                 goto err_out_apeunmap;
17704                         }
17705                 }
17706         }
17707         if (err || dma_mask == DMA_BIT_MASK(32)) {
17708                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17709                 if (err) {
17710                         dev_err(&pdev->dev,
17711                                 "No usable DMA configuration, aborting\n");
17712                         goto err_out_apeunmap;
17713                 }
17714         }
17715
17716         tg3_init_bufmgr_config(tp);
17717
17718         /* 5700 B0 chips do not support checksumming correctly due
17719          * to hardware bugs.
17720          */
17721         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17722                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17723
17724                 if (tg3_flag(tp, 5755_PLUS))
17725                         features |= NETIF_F_IPV6_CSUM;
17726         }
17727
17728         /* TSO is on by default on chips that support hardware TSO.
17729          * Firmware TSO on older chips gives lower performance, so it
17730          * is off by default, but can be enabled using ethtool.
17731          */
17732         if ((tg3_flag(tp, HW_TSO_1) ||
17733              tg3_flag(tp, HW_TSO_2) ||
17734              tg3_flag(tp, HW_TSO_3)) &&
17735             (features & NETIF_F_IP_CSUM))
17736                 features |= NETIF_F_TSO;
17737         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17738                 if (features & NETIF_F_IPV6_CSUM)
17739                         features |= NETIF_F_TSO6;
17740                 if (tg3_flag(tp, HW_TSO_3) ||
17741                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17742                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17743                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17744                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17745                     tg3_asic_rev(tp) == ASIC_REV_57780)
17746                         features |= NETIF_F_TSO_ECN;
17747         }
17748
17749         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17750                          NETIF_F_HW_VLAN_CTAG_RX;
17751         dev->vlan_features |= features;
17752
17753         /*
17754          * Add loopback capability only for a subset of devices that support
17755          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17756          * loopback for the remaining devices.
17757          */
17758         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17759             !tg3_flag(tp, CPMU_PRESENT))
17760                 /* Add the loopback capability */
17761                 features |= NETIF_F_LOOPBACK;
17762
17763         dev->hw_features |= features;
17764         dev->priv_flags |= IFF_UNICAST_FLT;
17765
17766         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17767         dev->min_mtu = TG3_MIN_MTU;
17768         dev->max_mtu = TG3_MAX_MTU(tp);
17769
17770         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17771             !tg3_flag(tp, TSO_CAPABLE) &&
17772             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17773                 tg3_flag_set(tp, MAX_RXPEND_64);
17774                 tp->rx_pending = 63;
17775         }
17776
17777         err = tg3_get_device_address(tp, addr);
17778         if (err) {
17779                 dev_err(&pdev->dev,
17780                         "Could not obtain valid ethernet address, aborting\n");
17781                 goto err_out_apeunmap;
17782         }
17783         eth_hw_addr_set(dev, addr);
17784
17785         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17786         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17787         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17788         for (i = 0; i < tp->irq_max; i++) {
17789                 struct tg3_napi *tnapi = &tp->napi[i];
17790
17791                 tnapi->tp = tp;
17792                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17793
17794                 tnapi->int_mbox = intmbx;
17795                 if (i <= 4)
17796                         intmbx += 0x8;
17797                 else
17798                         intmbx += 0x4;
17799
17800                 tnapi->consmbox = rcvmbx;
17801                 tnapi->prodmbox = sndmbx;
17802
17803                 if (i)
17804                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17805                 else
17806                         tnapi->coal_now = HOSTCC_MODE_NOW;
17807
17808                 if (!tg3_flag(tp, SUPPORT_MSIX))
17809                         break;
17810
17811                 /*
17812                  * If we support MSIX, we'll be using RSS.  If we're using
17813                  * RSS, the first vector only handles link interrupts and the
17814                  * remaining vectors handle rx and tx interrupts.  Reuse the
17815                  * mailbox values for the next iteration.  The values we setup
17816                  * above are still useful for the single vectored mode.
17817                  */
17818                 if (!i)
17819                         continue;
17820
17821                 rcvmbx += 0x8;
17822
17823                 if (sndmbx & 0x4)
17824                         sndmbx -= 0x4;
17825                 else
17826                         sndmbx += 0xc;
17827         }
17828
17829         /*
17830          * Reset chip in case UNDI or EFI driver did not shutdown
17831          * DMA self test will enable WDMAC and we'll see (spurious)
17832          * pending DMA on the PCI bus at that point.
17833          */
17834         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17835             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17836                 tg3_full_lock(tp, 0);
17837                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17838                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17839                 tg3_full_unlock(tp);
17840         }
17841
17842         err = tg3_test_dma(tp);
17843         if (err) {
17844                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17845                 goto err_out_apeunmap;
17846         }
17847
17848         tg3_init_coal(tp);
17849
17850         pci_set_drvdata(pdev, dev);
17851
17852         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17853             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17854             tg3_asic_rev(tp) == ASIC_REV_5762)
17855                 tg3_flag_set(tp, PTP_CAPABLE);
17856
17857         tg3_timer_init(tp);
17858
17859         tg3_carrier_off(tp);
17860
17861         err = register_netdev(dev);
17862         if (err) {
17863                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17864                 goto err_out_apeunmap;
17865         }
17866
17867         if (tg3_flag(tp, PTP_CAPABLE)) {
17868                 tg3_ptp_init(tp);
17869                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17870                                                    &tp->pdev->dev);
17871                 if (IS_ERR(tp->ptp_clock))
17872                         tp->ptp_clock = NULL;
17873         }
17874
17875         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17876                     tp->board_part_number,
17877                     tg3_chip_rev_id(tp),
17878                     tg3_bus_string(tp, str),
17879                     dev->dev_addr);
17880
17881         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17882                 char *ethtype;
17883
17884                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17885                         ethtype = "10/100Base-TX";
17886                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17887                         ethtype = "1000Base-SX";
17888                 else
17889                         ethtype = "10/100/1000Base-T";
17890
17891                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17892                             "(WireSpeed[%d], EEE[%d])\n",
17893                             tg3_phy_string(tp), ethtype,
17894                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17895                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17896         }
17897
17898         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17899                     (dev->features & NETIF_F_RXCSUM) != 0,
17900                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17901                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17902                     tg3_flag(tp, ENABLE_ASF) != 0,
17903                     tg3_flag(tp, TSO_CAPABLE) != 0);
17904         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17905                     tp->dma_rwctrl,
17906                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17907                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17908
17909         pci_save_state(pdev);
17910
17911         return 0;
17912
17913 err_out_apeunmap:
17914         if (tp->aperegs) {
17915                 iounmap(tp->aperegs);
17916                 tp->aperegs = NULL;
17917         }
17918
17919 err_out_iounmap:
17920         if (tp->regs) {
17921                 iounmap(tp->regs);
17922                 tp->regs = NULL;
17923         }
17924
17925 err_out_free_dev:
17926         free_netdev(dev);
17927
17928 err_out_free_res:
17929         pci_release_regions(pdev);
17930
17931 err_out_disable_pdev:
17932         if (pci_is_enabled(pdev))
17933                 pci_disable_device(pdev);
17934         return err;
17935 }
17936
17937 static void tg3_remove_one(struct pci_dev *pdev)
17938 {
17939         struct net_device *dev = pci_get_drvdata(pdev);
17940
17941         if (dev) {
17942                 struct tg3 *tp = netdev_priv(dev);
17943
17944                 tg3_ptp_fini(tp);
17945
17946                 release_firmware(tp->fw);
17947
17948                 tg3_reset_task_cancel(tp);
17949
17950                 if (tg3_flag(tp, USE_PHYLIB)) {
17951                         tg3_phy_fini(tp);
17952                         tg3_mdio_fini(tp);
17953                 }
17954
17955                 unregister_netdev(dev);
17956                 if (tp->aperegs) {
17957                         iounmap(tp->aperegs);
17958                         tp->aperegs = NULL;
17959                 }
17960                 if (tp->regs) {
17961                         iounmap(tp->regs);
17962                         tp->regs = NULL;
17963                 }
17964                 free_netdev(dev);
17965                 pci_release_regions(pdev);
17966                 pci_disable_device(pdev);
17967         }
17968 }
17969
17970 #ifdef CONFIG_PM_SLEEP
17971 static int tg3_suspend(struct device *device)
17972 {
17973         struct net_device *dev = dev_get_drvdata(device);
17974         struct tg3 *tp = netdev_priv(dev);
17975         int err = 0;
17976
17977         rtnl_lock();
17978
17979         if (!netif_running(dev))
17980                 goto unlock;
17981
17982         tg3_reset_task_cancel(tp);
17983         tg3_phy_stop(tp);
17984         tg3_netif_stop(tp);
17985
17986         tg3_timer_stop(tp);
17987
17988         tg3_full_lock(tp, 1);
17989         tg3_disable_ints(tp);
17990         tg3_full_unlock(tp);
17991
17992         netif_device_detach(dev);
17993
17994         tg3_full_lock(tp, 0);
17995         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17996         tg3_flag_clear(tp, INIT_COMPLETE);
17997         tg3_full_unlock(tp);
17998
17999         err = tg3_power_down_prepare(tp);
18000         if (err) {
18001                 int err2;
18002
18003                 tg3_full_lock(tp, 0);
18004
18005                 tg3_flag_set(tp, INIT_COMPLETE);
18006                 err2 = tg3_restart_hw(tp, true);
18007                 if (err2)
18008                         goto out;
18009
18010                 tg3_timer_start(tp);
18011
18012                 netif_device_attach(dev);
18013                 tg3_netif_start(tp);
18014
18015 out:
18016                 tg3_full_unlock(tp);
18017
18018                 if (!err2)
18019                         tg3_phy_start(tp);
18020         }
18021
18022 unlock:
18023         rtnl_unlock();
18024         return err;
18025 }
18026
18027 static int tg3_resume(struct device *device)
18028 {
18029         struct net_device *dev = dev_get_drvdata(device);
18030         struct tg3 *tp = netdev_priv(dev);
18031         int err = 0;
18032
18033         rtnl_lock();
18034
18035         if (!netif_running(dev))
18036                 goto unlock;
18037
18038         netif_device_attach(dev);
18039
18040         tg3_full_lock(tp, 0);
18041
18042         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18043
18044         tg3_flag_set(tp, INIT_COMPLETE);
18045         err = tg3_restart_hw(tp,
18046                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18047         if (err)
18048                 goto out;
18049
18050         tg3_timer_start(tp);
18051
18052         tg3_netif_start(tp);
18053
18054 out:
18055         tg3_full_unlock(tp);
18056
18057         if (!err)
18058                 tg3_phy_start(tp);
18059
18060 unlock:
18061         rtnl_unlock();
18062         return err;
18063 }
18064 #endif /* CONFIG_PM_SLEEP */
18065
18066 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18067
18068 static void tg3_shutdown(struct pci_dev *pdev)
18069 {
18070         struct net_device *dev = pci_get_drvdata(pdev);
18071         struct tg3 *tp = netdev_priv(dev);
18072
18073         tg3_reset_task_cancel(tp);
18074
18075         rtnl_lock();
18076
18077         netif_device_detach(dev);
18078
18079         if (netif_running(dev))
18080                 dev_close(dev);
18081
18082         tg3_power_down(tp);
18083
18084         rtnl_unlock();
18085
18086         pci_disable_device(pdev);
18087 }
18088
18089 /**
18090  * tg3_io_error_detected - called when PCI error is detected
18091  * @pdev: Pointer to PCI device
18092  * @state: The current pci connection state
18093  *
18094  * This function is called after a PCI bus error affecting
18095  * this device has been detected.
18096  */
18097 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18098                                               pci_channel_state_t state)
18099 {
18100         struct net_device *netdev = pci_get_drvdata(pdev);
18101         struct tg3 *tp = netdev_priv(netdev);
18102         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18103
18104         netdev_info(netdev, "PCI I/O error detected\n");
18105
18106         /* Want to make sure that the reset task doesn't run */
18107         tg3_reset_task_cancel(tp);
18108
18109         rtnl_lock();
18110
18111         /* Could be second call or maybe we don't have netdev yet */
18112         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18113                 goto done;
18114
18115         /* We needn't recover from permanent error */
18116         if (state == pci_channel_io_frozen)
18117                 tp->pcierr_recovery = true;
18118
18119         tg3_phy_stop(tp);
18120
18121         tg3_netif_stop(tp);
18122
18123         tg3_timer_stop(tp);
18124
18125         netif_device_detach(netdev);
18126
18127         /* Clean up software state, even if MMIO is blocked */
18128         tg3_full_lock(tp, 0);
18129         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18130         tg3_full_unlock(tp);
18131
18132 done:
18133         if (state == pci_channel_io_perm_failure) {
18134                 if (netdev) {
18135                         tg3_napi_enable(tp);
18136                         dev_close(netdev);
18137                 }
18138                 err = PCI_ERS_RESULT_DISCONNECT;
18139         } else {
18140                 pci_disable_device(pdev);
18141         }
18142
18143         rtnl_unlock();
18144
18145         return err;
18146 }
18147
18148 /**
18149  * tg3_io_slot_reset - called after the pci bus has been reset.
18150  * @pdev: Pointer to PCI device
18151  *
18152  * Restart the card from scratch, as if from a cold-boot.
18153  * At this point, the card has exprienced a hard reset,
18154  * followed by fixups by BIOS, and has its config space
18155  * set up identically to what it was at cold boot.
18156  */
18157 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18158 {
18159         struct net_device *netdev = pci_get_drvdata(pdev);
18160         struct tg3 *tp = netdev_priv(netdev);
18161         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18162         int err;
18163
18164         rtnl_lock();
18165
18166         if (pci_enable_device(pdev)) {
18167                 dev_err(&pdev->dev,
18168                         "Cannot re-enable PCI device after reset.\n");
18169                 goto done;
18170         }
18171
18172         pci_set_master(pdev);
18173         pci_restore_state(pdev);
18174         pci_save_state(pdev);
18175
18176         if (!netdev || !netif_running(netdev)) {
18177                 rc = PCI_ERS_RESULT_RECOVERED;
18178                 goto done;
18179         }
18180
18181         err = tg3_power_up(tp);
18182         if (err)
18183                 goto done;
18184
18185         rc = PCI_ERS_RESULT_RECOVERED;
18186
18187 done:
18188         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18189                 tg3_napi_enable(tp);
18190                 dev_close(netdev);
18191         }
18192         rtnl_unlock();
18193
18194         return rc;
18195 }
18196
18197 /**
18198  * tg3_io_resume - called when traffic can start flowing again.
18199  * @pdev: Pointer to PCI device
18200  *
18201  * This callback is called when the error recovery driver tells
18202  * us that its OK to resume normal operation.
18203  */
18204 static void tg3_io_resume(struct pci_dev *pdev)
18205 {
18206         struct net_device *netdev = pci_get_drvdata(pdev);
18207         struct tg3 *tp = netdev_priv(netdev);
18208         int err;
18209
18210         rtnl_lock();
18211
18212         if (!netdev || !netif_running(netdev))
18213                 goto done;
18214
18215         tg3_full_lock(tp, 0);
18216         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18217         tg3_flag_set(tp, INIT_COMPLETE);
18218         err = tg3_restart_hw(tp, true);
18219         if (err) {
18220                 tg3_full_unlock(tp);
18221                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18222                 goto done;
18223         }
18224
18225         netif_device_attach(netdev);
18226
18227         tg3_timer_start(tp);
18228
18229         tg3_netif_start(tp);
18230
18231         tg3_full_unlock(tp);
18232
18233         tg3_phy_start(tp);
18234
18235 done:
18236         tp->pcierr_recovery = false;
18237         rtnl_unlock();
18238 }
18239
18240 static const struct pci_error_handlers tg3_err_handler = {
18241         .error_detected = tg3_io_error_detected,
18242         .slot_reset     = tg3_io_slot_reset,
18243         .resume         = tg3_io_resume
18244 };
18245
18246 static struct pci_driver tg3_driver = {
18247         .name           = DRV_MODULE_NAME,
18248         .id_table       = tg3_pci_tbl,
18249         .probe          = tg3_init_one,
18250         .remove         = tg3_remove_one,
18251         .err_handler    = &tg3_err_handler,
18252         .driver.pm      = &tg3_pm_ops,
18253         .shutdown       = tg3_shutdown,
18254 };
18255
18256 module_pci_driver(tg3_driver);